2020-11-27 16:41:49 +01:00
|
|
|
# THIS FILE IS A PART OF VCStudio
|
|
|
|
# PYTHON 3
|
|
|
|
|
|
|
|
import os
|
|
|
|
import datetime
|
2020-12-09 20:16:04 +01:00
|
|
|
import json
|
2020-11-27 16:41:49 +01:00
|
|
|
|
2020-11-28 10:54:50 +01:00
|
|
|
from studio import checklist
|
|
|
|
from studio import story
|
2020-12-13 04:06:44 +01:00
|
|
|
from studio import schedule
|
|
|
|
|
2020-12-03 16:07:29 +01:00
|
|
|
from settings import settings
|
2020-12-09 20:16:04 +01:00
|
|
|
from settings import talk
|
2020-11-27 16:41:49 +01:00
|
|
|
|
2020-12-13 04:06:44 +01:00
|
|
|
def iftime(string):
|
|
|
|
|
|
|
|
if len(string) != len("00:00:00"):
|
|
|
|
return False
|
|
|
|
|
|
|
|
if len(string.split(":")) != 3:
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
|
|
|
for n, i in enumerate(string.split(":")):
|
|
|
|
|
|
|
|
if len(i) != 2:
|
|
|
|
return False
|
|
|
|
|
|
|
|
i = int(i)
|
|
|
|
|
|
|
|
if n == 0 and i > 23:
|
|
|
|
return False
|
|
|
|
if i > 59:
|
|
|
|
return False
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
return True
|
2020-12-10 23:08:22 +01:00
|
|
|
|
|
|
|
def ifdate(string):
|
2020-12-13 04:06:44 +01:00
|
|
|
|
|
|
|
if len(string) != len("1997/07/30"):
|
|
|
|
return False
|
|
|
|
|
2020-12-10 23:08:22 +01:00
|
|
|
new_date_format = "%Y/%m/%d"
|
|
|
|
try:
|
|
|
|
datetime.datetime.strptime(string, new_date_format)
|
|
|
|
ret = True
|
|
|
|
except:
|
|
|
|
ret = False
|
|
|
|
|
|
|
|
return ret
|
|
|
|
|
2023-02-18 13:46:54 +01:00
|
|
|
def if_days_a_week(string):
|
|
|
|
|
|
|
|
try:
|
|
|
|
string = int(string)
|
|
|
|
if string <= 7 and string >= 1:
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
except Exception as e:
|
|
|
|
print("fail", e)
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2020-11-27 16:41:49 +01:00
|
|
|
def get_legacy(project_location):
|
|
|
|
|
|
|
|
# This function will return analytics data about a project. This particular
|
|
|
|
# function is desinged to read old, Blender-Organizer projects. It's a first
|
|
|
|
# step of conversion. And used to display basic analitycs into the
|
|
|
|
# project-manager.
|
|
|
|
|
2020-12-09 20:16:04 +01:00
|
|
|
name_tmp = project_location[project_location.rfind("/")+1:]
|
|
|
|
|
2020-11-27 16:41:49 +01:00
|
|
|
data = {
|
2020-12-09 20:16:04 +01:00
|
|
|
"name" : name_tmp, # Name of the project (typed properly)
|
2020-11-27 16:41:49 +01:00
|
|
|
"director" : "", # Name of the project's director.
|
|
|
|
"status" : "", # Projects's comment / type
|
|
|
|
"donework" : 0.0, # Percentage of Assets and Scenes done
|
|
|
|
"fraction" : 0.0, # Project's completion percentage
|
|
|
|
"checklist" : 0.0, # Project's main checklist percentage
|
|
|
|
"startdate" : "0000/00/00", # Date of the start of the project
|
|
|
|
"deadline" : "0000/00/00", # Date when project's deadline is
|
|
|
|
"duration" : 0, # Amount in days between startdate and deadline
|
|
|
|
"timepassed" : 0.0, # Percentage of how much time had passed
|
|
|
|
"dayspassed" : 0, # Amount of days since the startdate
|
2023-02-04 18:52:33 +01:00
|
|
|
"needed" : 0, # Needed % per day
|
|
|
|
"star" : 0, # If star is reached
|
2020-11-27 16:41:49 +01:00
|
|
|
"chr_factor" : 1, # Importance factor for Characters
|
|
|
|
"veh_factor" : 1, # Importance factor for Vehicles
|
|
|
|
"loc_factor" : 1, # Importance factor for Locations
|
|
|
|
"obj_factor" : 1, # Importance factor for Objects (Other)
|
2020-12-09 20:16:04 +01:00
|
|
|
"rnd_factor" : 4, # Importance factor for Scenes (Renders)
|
2020-11-27 16:41:49 +01:00
|
|
|
"chr" : 0.0, # Percentage of Characters done
|
|
|
|
"veh" : 0.0, # Percentage of Vehicles done
|
|
|
|
"loc" : 0.0, # Percentage of Locations done
|
|
|
|
"obj" : 0.0, # Percentage of Objects (Other) done
|
|
|
|
"rnd" : 0.0, # Percentage of Scenes (Renders) done
|
|
|
|
"dates" : {} # Per date, detailed data about the project
|
|
|
|
}
|
|
|
|
|
2020-12-03 16:07:29 +01:00
|
|
|
# For the future we will have to use some kind of network system. Where
|
|
|
|
# different people contribute to one project. For example director, writer,
|
|
|
|
# animator. For this I want to introduce a USERNAME thing. I will add them
|
|
|
|
# later to tasks and schedulings.
|
|
|
|
|
|
|
|
Username = settings.read("Username")
|
|
|
|
if not Username:
|
|
|
|
Username = "Blender-Organizer User"
|
|
|
|
|
2020-11-27 16:41:49 +01:00
|
|
|
# Okay let's get the name, director and status from the old file. Funny that
|
|
|
|
# it still survived from so far back. In the Organizer 1.0 you had to manually
|
|
|
|
# type in the number of assets that had to be done in each category.
|
|
|
|
|
|
|
|
# And so for this task was created a file called "project.data". Here is
|
|
|
|
# an example :
|
|
|
|
|
|
|
|
# Project :Moria's Race
|
|
|
|
# Status :Short Action Driving Film
|
|
|
|
# Director :J.Y.Amihud
|
|
|
|
# Character:1
|
|
|
|
# Locations:1
|
|
|
|
# Objects :1
|
|
|
|
# Vehicles :1
|
|
|
|
# Scenes :4
|
|
|
|
|
|
|
|
# So that's survived up to the last Blender-Organizer. But no longer the
|
|
|
|
# last 5 lines were utilized. I was using it mainly for the first 3 things.
|
|
|
|
|
|
|
|
# Tho from a version 4.85 of Blender-Organizer those last lines were used
|
|
|
|
# to controll the influence factor. Basically the number stored will multiply
|
|
|
|
# the number of times a given category is counted in the final percentage.
|
|
|
|
|
|
|
|
# For example animating scenes should take more then half the time of the
|
|
|
|
# project. More then 50% of the project then should be in the scenes. But
|
|
|
|
# previous / primitive algorythm was giving each category an even 20%. So
|
|
|
|
# of course I fixed it. LOL.
|
|
|
|
|
|
|
|
# I think for VCStudio I gonna make a more unified file format. That will
|
|
|
|
# unite all "project.data", "percentage_hystory.data", "history.data" and
|
|
|
|
# "schedule.data".
|
|
|
|
|
|
|
|
# I'm going to still have the main checklist separate tho. The checklist
|
|
|
|
# format is quite good.
|
|
|
|
|
|
|
|
projectdata = open(project_location+"/project.data")
|
|
|
|
projectdata = projectdata.read()
|
|
|
|
projectdata = projectdata.split("\n")
|
|
|
|
|
|
|
|
for line in projectdata:
|
|
|
|
|
|
|
|
if line.startswith("Project"):
|
|
|
|
data["name"] = line[line.find(":")+1:]
|
|
|
|
|
|
|
|
elif line.startswith("Status"):
|
|
|
|
data["status"] = line[line.find(":")+1:]
|
|
|
|
|
|
|
|
elif line.startswith("Director"):
|
|
|
|
data["director"] = line[line.find(":")+1:]
|
|
|
|
|
|
|
|
# Next up some integer conversions. So...
|
|
|
|
|
|
|
|
elif line.startswith("Character"):
|
|
|
|
try:
|
|
|
|
data["chr_factor"] = int(line[line.find(":")+1:])
|
|
|
|
except:
|
|
|
|
data["chr_factor"] = 1
|
|
|
|
|
|
|
|
|
|
|
|
elif line.startswith("Vehicles"):
|
|
|
|
try:
|
|
|
|
data["veh_factor"] = int(line[line.find(":")+1:])
|
|
|
|
except:
|
|
|
|
data["veh_factor"] = 1
|
|
|
|
|
|
|
|
elif line.startswith("Locations"):
|
|
|
|
try:
|
|
|
|
data["loc_factor"] = int(line[line.find(":")+1:])
|
|
|
|
except:
|
|
|
|
data["loc_factor"] = 1
|
|
|
|
|
|
|
|
elif line.startswith("Objects"):
|
|
|
|
try:
|
|
|
|
data["obj_factor"] = int(line[line.find(":")+1:])
|
|
|
|
except:
|
|
|
|
data["obj_factor"] = 1
|
|
|
|
|
|
|
|
elif line.startswith("Scenes"):
|
|
|
|
try:
|
|
|
|
data["rnd_factor"] = int(line[line.find(":")+1:])
|
|
|
|
except:
|
|
|
|
data["rnd_factor"] = 1
|
|
|
|
|
|
|
|
# Okay this first file was easy. Let's now parse the main checklist and
|
|
|
|
# get 5 more values. Funny thing is that for the old checklists and new
|
|
|
|
# checklists you can use the same function to read them. ( The main check-
|
|
|
|
# list data ). But old checklists had something else in there that was
|
|
|
|
# making them a bit different from VCStudio checklists. It's the STR and FIN
|
|
|
|
# variables in the beginig.
|
|
|
|
|
|
|
|
# See in the Organizer 1.0 there was no scheduling system as there was in
|
|
|
|
# Blender-Orgaznier 4.9. The scheduling was done per asset and not per
|
|
|
|
# action in the checklist. So STR and FIN were 2 dates between which you
|
|
|
|
# should have had working on the asset.
|
|
|
|
|
|
|
|
# But in the main checklists ("project.progress") those 2 survived to the
|
|
|
|
# late Blender-Organizer as a project startdate and deadline.
|
|
|
|
|
|
|
|
# One more rub that I have is that date format was 00/00/0000 and not the
|
|
|
|
# way better one 0000/00/00 which makes sorting easy. So yeah...
|
|
|
|
|
|
|
|
old_date_format = "%d/%m/%Y"
|
|
|
|
new_date_format = "%Y/%m/%d"
|
|
|
|
|
|
|
|
projectdata = open(project_location+"/project.progress")
|
|
|
|
projectdata = projectdata.read()
|
|
|
|
projectdata = projectdata.split("\n")
|
|
|
|
|
|
|
|
startdate = datetime.datetime.today()
|
|
|
|
deadline = datetime.datetime.today()
|
|
|
|
|
2020-12-10 23:08:22 +01:00
|
|
|
try:
|
|
|
|
for line in projectdata:
|
|
|
|
if line.startswith("STR"):
|
|
|
|
startdate = datetime.datetime.strptime(line[4:], old_date_format)
|
|
|
|
data["startdate"] = datetime.datetime.strftime(startdate, new_date_format)
|
|
|
|
|
|
|
|
elif line.startswith("FIN"):
|
|
|
|
deadline = datetime.datetime.strptime(line[4:], old_date_format)
|
|
|
|
data["deadline"] = datetime.datetime.strftime(deadline, new_date_format)
|
|
|
|
except:
|
|
|
|
|
|
|
|
data["startdate"] = datetime.datetime.strftime(datetime.datetime.today(), new_date_format)
|
|
|
|
data["deadline"] = datetime.datetime.strftime(datetime.datetime.today()+datetime.timedelta(days=30), new_date_format)
|
2020-11-27 16:41:49 +01:00
|
|
|
|
|
|
|
# So we've go the dates. Let's calculate time perventage I guess.
|
|
|
|
delta = deadline - startdate
|
|
|
|
data["duration"] = int(delta.days)
|
|
|
|
|
|
|
|
delta = datetime.datetime.today() - startdate
|
|
|
|
data["dayspassed"] = int(delta.days)
|
|
|
|
|
|
|
|
data["timepassed"] = data["dayspassed"] / data["duration"]
|
|
|
|
if data["timepassed"] > 1.0:
|
|
|
|
data["timepassed"] = 1.0
|
|
|
|
|
|
|
|
# Now let's lauch the main checklist and get the data from there. I mean
|
|
|
|
# the percentage. Btw It's just a hard thing. That I needed a separate
|
|
|
|
# function for it.
|
|
|
|
|
2020-12-09 20:16:04 +01:00
|
|
|
try:
|
|
|
|
projectdata = checklist.get_list(project_location+"/project.progress")
|
|
|
|
data["checklist"] = projectdata["fraction"]
|
|
|
|
except:
|
|
|
|
pass
|
2020-11-27 16:41:49 +01:00
|
|
|
|
|
|
|
# NEXT THING. As I love to type it into place where people read me while I'm
|
|
|
|
# working. We've got data from 2 files. Now we need to get data from ALL the
|
|
|
|
# project.
|
|
|
|
|
|
|
|
# First we going to get data about the assets. Because it's relativelly easy
|
|
|
|
# compared to the story. For which you need to parce a crazy complicated .bos
|
|
|
|
# file. Which is a complex database in it's own right.
|
|
|
|
|
|
|
|
# So let's go and quickly get data about the assets.
|
|
|
|
|
|
|
|
asstfols = ["chr", "veh", "loc", "obj"]
|
|
|
|
astlist = []
|
|
|
|
|
|
|
|
for n , f in enumerate(asstfols):
|
|
|
|
|
|
|
|
flist = []
|
|
|
|
|
|
|
|
if len(os.listdir(project_location+"/dev/"+f)) > 0:
|
|
|
|
for asset in os.listdir(project_location+"/dev/"+f):
|
|
|
|
|
|
|
|
if asset+".blend" in os.listdir(project_location+"/ast/"+f):
|
|
|
|
flist.append(1.0)
|
|
|
|
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
fcheck = checklist.get_list(project_location+"/dev/"+f+"/"+asset+"/asset.progress")
|
|
|
|
flist.append(fcheck["fraction"])
|
|
|
|
except:
|
|
|
|
flist.append(0.0)
|
|
|
|
|
|
|
|
# The multiplication thing that I was talking about earlier.
|
|
|
|
|
|
|
|
multiply = data[f+"_factor"]
|
|
|
|
for m in range(multiply):
|
|
|
|
astlist.append(sum(flist)/len(flist))
|
|
|
|
|
|
|
|
data[f] = sum(flist)/len(flist)
|
|
|
|
|
|
|
|
|
|
|
|
# For the next step I need to have the story parsed and read. But it's going
|
|
|
|
# to be so hard. That I will need to write a separate function for it.
|
|
|
|
|
2020-11-28 10:54:50 +01:00
|
|
|
data["rnd"] = story.get_legacy(project_location)["fraction"]
|
2020-11-27 16:41:49 +01:00
|
|
|
|
|
|
|
# After all of it we need to get the final project percentage.
|
|
|
|
multiply = data["rnd_factor"]
|
|
|
|
for m in range(multiply):
|
|
|
|
astlist.append(data["rnd"])
|
|
|
|
|
|
|
|
try:
|
|
|
|
data["donework"] = sum(astlist) / len(astlist)
|
|
|
|
except:
|
|
|
|
data["donework"] = 0.0
|
|
|
|
data["fraction"] = (data["donework"] + data["checklist"]) / 2
|
|
|
|
|
|
|
|
# Next section of this data gathering will be about history, scheduling and
|
|
|
|
# similar things. I gonna create a dictionary of dates. And input data about
|
|
|
|
# those dates into the dates.
|
|
|
|
|
2020-11-30 14:19:22 +01:00
|
|
|
# One thing that I have to make sure about is that some dates could be out-
|
|
|
|
# side of the range between startdate and deadline. So I have to read the
|
|
|
|
# files and add them dynamically.
|
2020-11-27 16:41:49 +01:00
|
|
|
|
2020-11-30 14:19:22 +01:00
|
|
|
# Let's start by reading scheduling.
|
|
|
|
|
|
|
|
sdata = open(project_location+"/schedule.data")
|
|
|
|
sdata = sdata.read()
|
|
|
|
sdata = sdata.split("\n")
|
|
|
|
|
|
|
|
# Good that schedules already have the right date format. Altho I think of
|
|
|
|
# actually using a datetime object as a date. Or is it too much? Let me think
|
|
|
|
# I do think loudly. And sinse I have nobody to talk to I will type it here.
|
|
|
|
# If you couldn't tell already. Let's write it as a string for now. Because
|
|
|
|
# it's easier to work wit strings. Maybe in future I will write it as a
|
|
|
|
# datetime object. If i'll find benefit to this.
|
|
|
|
|
|
|
|
for date in sdata:
|
|
|
|
|
|
|
|
# Let's separate the string into 3 components. Basically scheduling is
|
|
|
|
# list of tasks in various checklists through out the project. Example:
|
|
|
|
|
|
|
|
# 2020/11/30 /dev/obj/Morias_Bike/asset.progress Rigging=:> Make Bones
|
|
|
|
# 2021/05/01 project.progress Marketing=:> Release
|
|
|
|
|
|
|
|
# The first entry is a date formatted yyyy/mm/dd. Then comes path of the
|
|
|
|
# checklist location. Relative to the project_location. Then a url type
|
|
|
|
# thing for with in the checklist. Separating entries with "=:>"
|
|
|
|
|
|
|
|
# Also I have to note that in the old Blender-Organizer date 1997/07/30
|
|
|
|
# which is my birthday, will make the task be everyday. It will have it's
|
|
|
|
# own Green color. Usually when you make a new project there are a list
|
|
|
|
# of task created for this date.
|
|
|
|
|
|
|
|
if date:
|
|
|
|
d = date[:date.find(" ")] # Date
|
|
|
|
f = date[date.find(" ")+1:date.replace(" ", ".", 1).find(" ")] # File
|
|
|
|
t = date[date.replace(" ", ".", 1).find(" ")+1:].split("=:>") # Task
|
|
|
|
|
|
|
|
if d not in data["dates"]:
|
|
|
|
data["dates"][d] = {}
|
|
|
|
|
|
|
|
# Now in order to decide how to input this data into the data["dates"]
|
|
|
|
# we need to look at history at first. Because first of all it has
|
|
|
|
# similar syntax. But on the other hand it's a bit more complex.
|
|
|
|
|
|
|
|
# 2020/07/05 03:06:34 /dev/chr/Civilian_Child_2/asset.progress Rigging=:> Face Rig=:> Mouth area [V]
|
|
|
|
# 2020/07/05 03:06:35 /dev/chr/Civilian_Child_2/asset.progress Rigging=:> Ready to make AST [V]
|
|
|
|
# 2020/07/05 03:06:37 /dev/chr/Civilian_Child_2/Civilian_Child_2.blend [Openned]
|
|
|
|
# 2020/07/05 03:07:56 /dev/chr/Civilian_Child_2/autolink.data [Updated]
|
|
|
|
# 2020/07/05 03:08:08 pln/main.bos [Edited]
|
|
|
|
# 2020/07/05 03:08:13 /rnd/Scene_2a5jfq6126fe/1/Cc1_IK_Test.blend [Openned]
|
|
|
|
# 2020/07/05 03:08:25 /rnd/Scene_2a5jfq6126fe/1/Cc1_IK_Test.blend [Linked]
|
|
|
|
# 2020/07/05 03:08:25 /rnd/Scene_2a5jfq6126fe/1/Cc1_IK_Test.blend [Openned]
|
|
|
|
|
|
|
|
# Also history has a feature missing in the schedules. But there is
|
|
|
|
# a rub. Scheduling you can rearange by moving them arround. And you
|
|
|
|
# can simply move a task to a different day if you so desire.
|
|
|
|
|
|
|
|
# In the history apart from the date you also have time up to the second
|
|
|
|
# which is a bit more precise in terms of when exactly you made a
|
|
|
|
# given task. And I would like scheduling to have the same type of
|
|
|
|
# precision. Maybe even introduce a notification system if a certain
|
|
|
|
# task is due. But it makes the moving of tasks highly complicated
|
|
|
|
# to visualize.
|
|
|
|
|
|
|
|
# Maybe a timeline could be a cool idea. Like in the VSE of blender.
|
|
|
|
# a place where you could put specific points on a timeline. And time
|
|
|
|
# to the next point will be hte length of the part.
|
|
|
|
|
|
|
|
# You could move the dots so to change timing of parts. And maybe
|
|
|
|
# select them to type in precise values. Could work.
|
|
|
|
|
|
|
|
|
|
|
|
# Let's separate the schedule to 3 types. Asset, Scene, Main.
|
|
|
|
|
|
|
|
# If it's a scene we are talking about. IK it's a bit shitty. Because
|
|
|
|
# I will need to parse this as well. But who cares.
|
|
|
|
|
|
|
|
if f.startswith("/rnd"):
|
|
|
|
ty = "scenes"
|
|
|
|
url = f[:f.rfind("/")].replace("/rnd", "", 1)
|
|
|
|
url = url[:url.rfind("/")]
|
|
|
|
fn = f.replace("/rnd", "", 1).replace(url, "")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
elif f.startswith("/dev"):
|
|
|
|
ty = "assets"
|
|
|
|
url = f[:f.rfind("/")].replace("/dev", "", 1)
|
|
|
|
fn = f.replace("/dev", "", 1).replace(url, "")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
else:
|
|
|
|
ty = "files"
|
|
|
|
url = ""
|
|
|
|
fn = f
|
|
|
|
|
|
|
|
if not ty in data["dates"][d]:
|
|
|
|
data["dates"][d][ty] = {}
|
|
|
|
|
|
|
|
if not url in data["dates"][d][ty]:
|
|
|
|
data["dates"][d][ty][url] = []
|
|
|
|
|
|
|
|
data["dates"][d][ty][url].append([
|
|
|
|
"00:00:00",
|
|
|
|
"schedule",
|
|
|
|
fn,
|
2020-12-03 16:07:29 +01:00
|
|
|
t,
|
|
|
|
Username
|
2020-11-30 14:19:22 +01:00
|
|
|
])
|
|
|
|
|
|
|
|
# Okay I don't really know what exactly did I just do. But it's seems like
|
|
|
|
# a good starting point. Up to a working verion of VCStudio i can edit and
|
|
|
|
# change these all I want. Okay I guess it's time to parse another, way
|
|
|
|
# harder file. The history. LOL.
|
|
|
|
|
|
|
|
# First let's deal with the persantage history file. It's kind of a funny
|
|
|
|
# one as well. The funny part is that it has no consistancy at all. I will
|
|
|
|
# need to convert it as well. It has a stupid %y-%m-%d format. Which is WTF.
|
|
|
|
|
|
|
|
history_percentage_format = "%y-%m-%d"
|
|
|
|
|
|
|
|
hdata = open(project_location+"/percentage_hystory.data") # IK spelling LOL
|
|
|
|
hdata = hdata.read()
|
|
|
|
hdata = hdata.split("\n")
|
|
|
|
|
|
|
|
for date in hdata:
|
|
|
|
if date.startswith("DATE"):
|
|
|
|
|
|
|
|
# An example of the formatting is actually quite amazing. It's going
|
|
|
|
# to be realively simple. Example:
|
|
|
|
|
|
|
|
# DATE 20-06-26 42.64%
|
|
|
|
|
|
|
|
# As you can see it's the word DATE then that date then the fraction
|
|
|
|
# as a percantage. Separated by a spacebar.
|
|
|
|
|
|
|
|
try:
|
|
|
|
t, d, f = date.split(" ")
|
|
|
|
|
|
|
|
# Converting the d into a normal date
|
|
|
|
|
|
|
|
d = datetime.datetime.strptime(d, history_percentage_format)
|
|
|
|
d = datetime.datetime.strftime(d, new_date_format)
|
|
|
|
|
|
|
|
# Converting the f into a fraction
|
|
|
|
|
|
|
|
f = float(f.replace("%", "")) / 100
|
|
|
|
|
|
|
|
except:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# I just don't want to deal with poeple who are editing the file
|
|
|
|
# manually and then say I did something wrong that the program crashed
|
|
|
|
|
|
|
|
if d not in data["dates"]:
|
|
|
|
data["dates"][d] = {}
|
|
|
|
|
|
|
|
data["dates"][d]["fractions"] = {
|
|
|
|
"project":f, # The fraction we recieved from the file
|
|
|
|
"checklist":0.0, # Place holder for checklist fraction
|
|
|
|
"chr":0.0, # \
|
|
|
|
"veh":0.0, # |
|
|
|
|
"loc":0.0, # > - Plaseholders for categories
|
|
|
|
"obj":0.0, # |
|
|
|
|
"rnd":0.0 # /
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# Okay this file was parsed. Which was relativelly simple. NOW. Let's
|
|
|
|
# do something a bit harder. Or is it as simple as the other? IDK
|
|
|
|
# History file. Example was a bit earlier.
|
|
|
|
|
|
|
|
hdata = open(project_location+"/history.data")
|
|
|
|
hdata = hdata.read()
|
|
|
|
hdata = hdata.split("\n")
|
|
|
|
|
|
|
|
for line in hdata:
|
|
|
|
|
|
|
|
if not line:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# So basically every line is quite simple from the left side. But becomes
|
|
|
|
# quite complex on the right side. So let's get the left side things
|
|
|
|
|
|
|
|
date = line[:line.find(" ")]
|
|
|
|
time = line[line.find(" ")+1:line.replace(" ", ".", 1).find(" ")]
|
|
|
|
path = line[line.replace(" ", ".", 1).find(" ")+1:line.replace(" ", ".", 2).find(" ")]
|
|
|
|
done = line[line.replace(" ", ".", 2).find(" ")+1:]
|
|
|
|
|
|
|
|
# I made a mistake allowing paths to have spaces in them. And I need to
|
|
|
|
# make sure we have the whole path and not only up to a point.
|
|
|
|
|
|
|
|
while done[0] != "[" and ".progress" not in path and "[" in done:
|
|
|
|
|
|
|
|
transporting = done[:done.find(" ")]
|
|
|
|
|
|
|
|
path = path+" "+transporting
|
|
|
|
done = done.replace(transporting+" ", "")
|
|
|
|
|
|
|
|
|
|
|
|
# "date" will be our standard date. yyyy/mm/dd. Then "time" is out time
|
|
|
|
# from the current day. 24 hours system. hh:mm:ss. "path" is the file to
|
|
|
|
# which the change was done. We will need to parse this one. Because it
|
|
|
|
# contains the full path to let's say a blend file. While we want to
|
|
|
|
# know which asset for example was accessed. Then "done" is the rest of
|
|
|
|
# the string. Basically it tells what was actually done with a given file.
|
|
|
|
# We will need to parse this one as well somehow. And it scares me a lot
|
|
|
|
|
|
|
|
# One thing that we can use it for is restoring the old scheduling list.
|
|
|
|
# Becuase in lazy attempt to make user know that a given task is finished
|
|
|
|
# I would just delete the task from the schedule file entirely. But
|
|
|
|
# in the history file you can find instances of [scheduled] with the full
|
|
|
|
# path of a task and the date to which it was scheduled. So yeah.
|
|
|
|
|
|
|
|
# Let's get to work.
|
|
|
|
|
|
|
|
f = path # Copied from previous
|
|
|
|
|
|
|
|
if f.startswith("/rnd"):
|
|
|
|
ty = "scenes"
|
|
|
|
url = f[:f.rfind("/")].replace("/rnd", "", 1)
|
|
|
|
url = url[:url.rfind("/")]
|
|
|
|
fn = f.replace("/rnd", "", 1).replace(url, "")
|
|
|
|
|
|
|
|
|
|
|
|
elif f.startswith("/dev"):
|
|
|
|
ty = "assets"
|
|
|
|
url = f[:f.rfind("/")].replace("/dev", "", 1)
|
|
|
|
fn = f.replace("/dev", "", 1).replace(url, "")
|
|
|
|
|
|
|
|
elif f.startswith("/ast") and ".blend" in f:
|
|
|
|
ty = "assets"
|
|
|
|
url = f[:f.rfind(".")].replace("/ast", "", 1)
|
|
|
|
fn = "[asset_blend]"
|
|
|
|
|
|
|
|
else:
|
|
|
|
ty = "files"
|
|
|
|
url = ""
|
|
|
|
fn = f
|
|
|
|
|
|
|
|
# Now in order to parse the "done" variable. We need to define functions
|
|
|
|
# that we are not looking for. Simple (Trivial) Operations.
|
|
|
|
|
|
|
|
simple_operations = [
|
|
|
|
"[Edited]",
|
|
|
|
"[Openned]",
|
|
|
|
"[Linked]",
|
|
|
|
"[Edited]",
|
|
|
|
"[Updated]",
|
|
|
|
"[Added]",
|
|
|
|
"[Added Asset]"
|
|
|
|
]
|
|
|
|
|
|
|
|
# Basically if "done" in on that list. It's something to do with
|
|
|
|
# checklists. And then we have either [V], [ ] or [Scheduled]
|
|
|
|
|
|
|
|
if done not in simple_operations:
|
|
|
|
if "[Scheduled]" in done:
|
|
|
|
|
|
|
|
# These are the missing scheduling data I was talking about.
|
|
|
|
# Let's parse this data from the back. The one on the back
|
|
|
|
# should be the DATE. Then spacebar. Then the [Scheduled] then
|
|
|
|
# another spacebar then the task.
|
|
|
|
|
|
|
|
missingdate = done[done.rfind(" ")+1:]
|
|
|
|
missingtask = done[:done.rfind(" [Scheduled]")].split("=:>")
|
|
|
|
|
|
|
|
# Let's add them into the data. I don't what it will be. Let's
|
|
|
|
# see.
|
|
|
|
|
|
|
|
|
|
|
|
if not missingdate in data["dates"]:
|
|
|
|
data["dates"][missingdate] = {}
|
|
|
|
|
|
|
|
if not ty in data["dates"][missingdate]:
|
|
|
|
data["dates"][missingdate][ty] = {}
|
|
|
|
|
|
|
|
if not url in data["dates"][missingdate][ty]:
|
|
|
|
data["dates"][missingdate][ty][url] = []
|
|
|
|
|
|
|
|
data["dates"][missingdate][ty][url].append([
|
|
|
|
"00:00:00",
|
|
|
|
"schedule",
|
|
|
|
fn,
|
2020-12-03 16:07:29 +01:00
|
|
|
missingtask,
|
|
|
|
Username
|
2020-11-30 14:19:22 +01:00
|
|
|
])
|
|
|
|
|
|
|
|
# Or else it's a checklist whether been checked or unchecked
|
|
|
|
|
|
|
|
else:
|
|
|
|
if "[V]" in done:
|
|
|
|
done = done.replace(" [V]", "").split("=:>")
|
2020-12-03 16:07:29 +01:00
|
|
|
check = "[Checked]"
|
2020-11-30 14:19:22 +01:00
|
|
|
else:
|
|
|
|
done = done.replace(" [ ]", "").split("=:>")
|
2020-12-03 16:07:29 +01:00
|
|
|
check = "[Un-Checked]"
|
2020-11-30 14:19:22 +01:00
|
|
|
|
|
|
|
# Putting the thing into the data
|
|
|
|
|
|
|
|
if not date in data["dates"]:
|
|
|
|
data["dates"][date] = {}
|
|
|
|
|
|
|
|
if not ty in data["dates"][date]:
|
|
|
|
data["dates"][date][ty] = {}
|
|
|
|
|
|
|
|
if not url in data["dates"][date][ty]:
|
|
|
|
data["dates"][date][ty][url] = []
|
|
|
|
|
|
|
|
data["dates"][date][ty][url].append([
|
|
|
|
time,
|
|
|
|
"history",
|
|
|
|
fn,
|
2020-12-03 16:07:29 +01:00
|
|
|
check,
|
|
|
|
done,
|
|
|
|
Username
|
2020-11-30 14:19:22 +01:00
|
|
|
])
|
2020-12-03 16:07:29 +01:00
|
|
|
|
|
|
|
#print(data["dates"][date][ty][url])
|
|
|
|
|
2020-11-30 14:19:22 +01:00
|
|
|
# Now let's add all the others.
|
|
|
|
|
|
|
|
else:
|
|
|
|
if not date in data["dates"]:
|
|
|
|
data["dates"][date] = {}
|
|
|
|
|
|
|
|
if not ty in data["dates"][date]:
|
|
|
|
data["dates"][date][ty] = {}
|
|
|
|
|
|
|
|
if not url in data["dates"][date][ty]:
|
|
|
|
data["dates"][date][ty][url] = []
|
|
|
|
|
|
|
|
data["dates"][date][ty][url].append([
|
|
|
|
time,
|
|
|
|
"history",
|
|
|
|
fn,
|
2020-12-03 16:07:29 +01:00
|
|
|
done,
|
|
|
|
Username
|
2020-11-30 14:19:22 +01:00
|
|
|
])
|
|
|
|
|
|
|
|
#print(ty, url, fn, done)
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-12-03 16:07:29 +01:00
|
|
|
#for i in sorted(data["dates"]):
|
|
|
|
# print(i)
|
|
|
|
# print()
|
|
|
|
# print(data["dates"][i])
|
|
|
|
# print()
|
|
|
|
# print()
|
2020-11-30 14:19:22 +01:00
|
|
|
|
2020-12-09 20:16:04 +01:00
|
|
|
#data_save(project_location, data)
|
2020-12-03 16:07:29 +01:00
|
|
|
|
2020-11-27 16:41:49 +01:00
|
|
|
return data
|
|
|
|
|
2020-12-09 20:16:04 +01:00
|
|
|
def save(project, data):
|
2020-11-30 14:19:22 +01:00
|
|
|
|
2020-12-09 20:16:04 +01:00
|
|
|
# This file will save analitycs data.
|
2020-11-30 14:19:22 +01:00
|
|
|
|
2020-12-09 20:16:04 +01:00
|
|
|
try:
|
|
|
|
os.mkdir(project+'/set/')
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
with open(project+'/set/analytics.json', 'w') as fp:
|
|
|
|
json.dump(data, fp, sort_keys=True, indent=4)
|
|
|
|
|
|
|
|
|
|
|
|
def load(project_location):
|
|
|
|
|
|
|
|
# This is a simple load analytics funtion.
|
|
|
|
name_tmp = project_location[project_location.rfind("/")+1:]
|
|
|
|
|
|
|
|
data = {
|
|
|
|
"name" : name_tmp, # Name of the project (typed properly)
|
|
|
|
"director" : "", # Name of the project's director.
|
|
|
|
"status" : "", # Projects's comment / type
|
|
|
|
"donework" : 0.0, # Percentage of Assets and Scenes done
|
|
|
|
"fraction" : 0.0, # Project's completion percentage
|
|
|
|
"checklist" : 0.0, # Project's main checklist percentage
|
|
|
|
"startdate" : "0000/00/00", # Date of the start of the project
|
|
|
|
"deadline" : "0000/00/00", # Date when project's deadline is
|
|
|
|
"duration" : 0, # Amount in days between startdate and deadline
|
|
|
|
"timepassed" : 0.0, # Percentage of how much time had passed
|
|
|
|
"dayspassed" : 0, # Amount of days since the startdate
|
|
|
|
"chr_factor" : 1, # Importance factor for Characters
|
2023-02-04 18:52:33 +01:00
|
|
|
"needed" : 0, # Needed % to do today
|
|
|
|
"star" : 0, # Did you get needed
|
2020-12-09 20:16:04 +01:00
|
|
|
"veh_factor" : 1, # Importance factor for Vehicles
|
|
|
|
"loc_factor" : 1, # Importance factor for Locations
|
|
|
|
"obj_factor" : 1, # Importance factor for Objects (Other)
|
|
|
|
"rnd_factor" : 4, # Importance factor for Scenes (Renders)
|
|
|
|
"chr" : 0.0, # Percentage of Characters done
|
|
|
|
"veh" : 0.0, # Percentage of Vehicles done
|
|
|
|
"loc" : 0.0, # Percentage of Locations done
|
|
|
|
"obj" : 0.0, # Percentage of Objects (Other) done
|
|
|
|
"rnd" : 0.0, # Percentage of Scenes (Renders) done
|
|
|
|
"dates" : {} # Per date, detailed data about the project
|
|
|
|
}
|
|
|
|
|
|
|
|
try:
|
|
|
|
with open(project_location+'/set/analytics.json') as json_file:
|
|
|
|
data = json.load(json_file)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
try:
|
|
|
|
projectdata = checklist.get_list(project_location+"/set/project.progress")
|
|
|
|
data["checklist"] = projectdata["fraction"]
|
|
|
|
except:
|
|
|
|
make = open(project_location+"/set/project.progress", "w")
|
|
|
|
make.write("[ ] Story\n")
|
|
|
|
make.write("[ ] "+talk.text("chr")+"\n")
|
|
|
|
make.write("[ ] "+talk.text("veh")+"\n")
|
|
|
|
make.write("[ ] "+talk.text("loc")+"\n")
|
|
|
|
make.write("[ ] "+talk.text("obj")+"\n")
|
|
|
|
make.write("[ ] Animation\n")
|
|
|
|
make.write("[ ] Rendering")
|
|
|
|
make.close()
|
|
|
|
|
2020-12-10 23:08:22 +01:00
|
|
|
# Let's see if dates are fine. Or if they are even dates.
|
|
|
|
|
|
|
|
new_date_format = "%Y/%m/%d"
|
|
|
|
if not ifdate(data["startdate"]) or not ifdate(data["deadline"]):
|
|
|
|
data["startdate"] = datetime.datetime.strftime(datetime.datetime.today(), new_date_format)
|
|
|
|
data["deadline"] = datetime.datetime.strftime(datetime.datetime.today()+datetime.timedelta(days=30), new_date_format)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# So we've go the dates. Let's calculate time perventage I guess.
|
|
|
|
|
|
|
|
startdate = datetime.datetime.strptime(data["startdate"], new_date_format)
|
|
|
|
deadline = datetime.datetime.strptime(data["deadline"] , new_date_format)
|
|
|
|
|
|
|
|
delta = deadline - startdate
|
|
|
|
data["duration"] = int(delta.days)
|
|
|
|
|
|
|
|
delta = datetime.datetime.today() - startdate
|
|
|
|
data["dayspassed"] = int(delta.days)
|
|
|
|
|
|
|
|
data["timepassed"] = data["dayspassed"] / data["duration"]
|
|
|
|
if data["timepassed"] > 1.0:
|
|
|
|
data["timepassed"] = 1.0
|
|
|
|
|
2020-12-13 04:06:44 +01:00
|
|
|
# LET's FILTER THE SCHEDULES
|
|
|
|
|
|
|
|
data["dates"] = schedule.filter(project_location, data["dates"])
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-12-09 20:16:04 +01:00
|
|
|
# NEXT THING. As I love to type it into place where people read me while I'm
|
|
|
|
# working. We've got data from 2 files. Now we need to get data from ALL the
|
|
|
|
# project.
|
|
|
|
|
|
|
|
# First we going to get data about the assets. Because it's relativelly easy
|
|
|
|
# compared to the story. For which you need to parce a crazy complicated .bos
|
|
|
|
# file. Which is a complex database in it's own right.
|
|
|
|
|
|
|
|
# So let's go and quickly get data about the assets.
|
|
|
|
|
|
|
|
asstfols = ["chr", "veh", "loc", "obj"]
|
|
|
|
astlist = []
|
|
|
|
|
|
|
|
for n , f in enumerate(asstfols):
|
|
|
|
|
|
|
|
flist = []
|
|
|
|
|
|
|
|
if len(os.listdir(project_location+"/dev/"+f)) > 0:
|
|
|
|
for asset in os.listdir(project_location+"/dev/"+f):
|
|
|
|
|
|
|
|
if asset+".blend" in os.listdir(project_location+"/ast/"+f):
|
|
|
|
flist.append(1.0)
|
|
|
|
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
fcheck = checklist.get_list(project_location+"/dev/"+f+"/"+asset+"/asset.progress")
|
|
|
|
flist.append(fcheck["fraction"])
|
|
|
|
except:
|
|
|
|
flist.append(0.0)
|
|
|
|
|
|
|
|
# The multiplication thing that I was talking about earlier.
|
|
|
|
|
|
|
|
multiply = data[f+"_factor"]
|
|
|
|
for m in range(multiply):
|
|
|
|
astlist.append(sum(flist)/len(flist))
|
|
|
|
|
|
|
|
data[f] = sum(flist)/len(flist)
|
|
|
|
|
|
|
|
|
|
|
|
# For the next step I need to have the story parsed and read. But it's going
|
|
|
|
# to be so hard. That I will need to write a separate function for it.
|
|
|
|
|
|
|
|
data["rnd"] = story.load(project_location)["fraction"]
|
|
|
|
|
|
|
|
# After all of it we need to get the final project percentage.
|
|
|
|
multiply = data["rnd_factor"]
|
|
|
|
for m in range(multiply):
|
|
|
|
astlist.append(data["rnd"])
|
|
|
|
|
|
|
|
try:
|
|
|
|
data["donework"] = sum(astlist) / len(astlist)
|
|
|
|
except:
|
|
|
|
data["donework"] = 0.0
|
2023-02-19 21:47:39 +01:00
|
|
|
|
|
|
|
# I decided to remove the main checklist from the main fraction. This way the fraction
|
|
|
|
# reflects better the overall project. I will keep the old code in a comment for a while.
|
|
|
|
data["fraction"] = data["donework"] #(data["donework"] + data["checklist"]) / 2
|
2020-12-09 20:16:04 +01:00
|
|
|
|
|
|
|
|
2020-12-13 04:06:44 +01:00
|
|
|
# Let's record it for today.
|
|
|
|
today = datetime.datetime.strftime(datetime.datetime.today(), new_date_format)
|
|
|
|
if today not in data["dates"]:
|
|
|
|
data["dates"][today] = {}
|
|
|
|
data["dates"][today]["fractions"] = {
|
|
|
|
"project":data["fraction"],
|
|
|
|
"checklist":data["checklist"],
|
|
|
|
"chr":data["chr"],
|
|
|
|
"veh":data["veh"],
|
|
|
|
"loc":data["loc"],
|
|
|
|
"obj":data["obj"],
|
|
|
|
"rnd":data["rnd"]
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-02-04 18:52:33 +01:00
|
|
|
# Needed
|
|
|
|
try:
|
2023-02-05 04:53:09 +01:00
|
|
|
alldates = list(reversed(data["dates"].keys()))
|
|
|
|
prevdate = alldates[alldates.index(today)+1]
|
|
|
|
prev_frac = data["dates"][prevdate].get("fractions", {}).get("project", data["fraction"])
|
2023-02-18 13:46:54 +01:00
|
|
|
data["needed"] = ( 1 - prev_frac ) / (( data["duration"] - data["dayspassed"] )/7*data.get("days_a_week", 7))
|
2023-02-04 18:52:33 +01:00
|
|
|
except:
|
|
|
|
data["needed"] = 0
|
2020-12-10 23:08:22 +01:00
|
|
|
|
2023-02-04 18:52:33 +01:00
|
|
|
prev_star = data.get("star", 0)
|
|
|
|
try:
|
2023-02-05 04:53:09 +01:00
|
|
|
alldates = list(reversed(data["dates"].keys()))
|
|
|
|
prevdate = alldates[alldates.index(today)+1]
|
|
|
|
prev_frac = data["dates"][prevdate].get("fractions", {}).get("project", data["fraction"])
|
2023-02-04 18:52:33 +01:00
|
|
|
data["star"] = (data["fraction"] - prev_frac) / data["needed"]
|
|
|
|
except:
|
|
|
|
data["star"] = 0
|
|
|
|
|
2023-03-15 19:25:46 +01:00
|
|
|
if data["star"] >= 2 and not prev_star >= 2:
|
2023-02-04 18:52:33 +01:00
|
|
|
talk.alert("⭐ Today's Requirement is Finished!")
|
|
|
|
|
|
|
|
return data
|