# THIS SOFTWARE IS A PART OF FASTLBRY PROJECT
# THIS SPECIFIC FILE IS UNDER GNU GPLv3 or later
import os
import json
from subprocess import *
import urllib.request
import urllib.parse
from urllib import request, parse
preicons = os.listdir("/home/vcs/Software/VCStudio/settings/themes/Default/icons")
icons = []
for i in preicons:
icons.append(i.replace(".png",""))
################################################################################
# Markdown. Or .md file format is an easy way to give your simple text documents
# a bit of flare. Stuff like links, images and quotes are supported. Also bold
# an italic characters.
def lbrynet(method="", params={}):
# First we will make an attempt to use the request module
try:
# To test the SDK subprocess thing I have this little switch
# 1/0 # Division by zero will trigger and error in this 'try'
# and will skip it, to test the other method.
data = {"method":method,
"params":params}
# The port of the LBRY SDK could be different for each user
# so here I'm trying to solve this issue
sdk_url = "http://localhost:5279"
try:
settings = open(os.path.expanduser('~/.local/share/lbry/lbrynet/daemon_settings.yml'))
for line in settings:
if line.startswith("api:"):
sdk_url = "http://"+line[5:]
except:
pass
data = str(json.dumps(data))
data = data.encode('utf-8')
req = request.Request(sdk_url, data=data)
resp = json.loads(request.urlopen(req).read())
try:
resp = resp["result"]
except:
pass
return resp
except Exception as e:
print("Failed", e)
return False
def odysee_get(link):
if link.startswith("lbry://"):
with open("lbry.json") as json_file:
lbry = json.load(json_file)
newlink = link.replace("lbry://", "/lbry/").replace("#", ":")
if newlink not in lbry["hosting"]:
out = lbrynet("resolve",
{"urls":[link]}
)
out = out[link]
try:
price = out["value"]["fee"]["amount"]
except:
price = 0
if price != 0:
return link
dpath = lbrynet("get", {"uri":link, "save_file":True}).get("download_path")
lbry["hosting"][newlink] = dpath
if dpath:
with open("lbry.json", 'w') as f:
json.dump(lbry, f, indent=4)
else:
return link
return newlink+"\n"
return link
def Open(md):
# Spliting it for the read.
md = "\n\n"+md
md = md.split("\n")
# First thing is I was to read the headings and convert it into a tree.
tree = []
indent = 1
c = []
skip = 0
for n,line in enumerate(md):
if skip > n:
continue
# Mentions
line2 = line
line = ""
for word in line2.split(" "):
if word.startswith("@"):
word = '['+word+'](/account/'+word.replace("@", "")+')'
line = line + " " + word
line = line[1:]
ty = "text"
te = line
# Here I want to simply get a type of each line. Later we going to parse
# the links and other things. But first. Let's parse stuff based on
# lines.
if line.startswith("```"):
# THREE ``` aka code block
# This tag will block any other tags
# untill it's untagged
code = ""
#print("#####", n)
for l in md[n+1:]:
if not l.startswith("```"):
code = code + l + "\n"
else:
skip = n + code.count("\n") + 2
break
#print("!!!!!!!!!", skip)
tree.append(["text_cm", code+"\n"])
te = ""
elif line.startswith("---"):
# Line break in markdown. In this case will be done like this:
te = '
'
elif line.startswith("#"):
# The titles of the chapter. The Headers are usually written similar
# to how here in python you write comments. It's a # , space, and the
# text.
# The amount of hashes. ## or ### gives different sized text. Officialy
# it should support up to 6 hashes. ######. But why not make it more
# just in case.
ty = line.count("#") # This might give bugs
tree.append([ty, te+"\n"])
# Now the stage 0 is over and we parsed the basic things. Now is the hard
# part to parse out all the images and stuff inside them. It's going to be
# done per part. And we are going to use the same technique I used for the
# conversion of the legacy projects. See : studio/story.py ( in VCStudio )
# We are going to itterate over each letter. And decide what to do by that
newtree = []
for block in tree:
if block[0] == "text_cm":
newtree.append(block)
continue
part = ""
skip = 0
for n, l in enumerate(block[-1]):
if skip > n:
continue
part = part + l
# Here we are going to do something if a give condition is met.
# Usually I gonna do something if [part] ends with a given markdown
# thing. I don't have a manual of markdown on me. So please make it
# more supported. I guess. I might forget things I rarely use.
# Links are made with [stuff you click on](https://example.com)
# but similar to it. Images are done ![Tooltip](Image.png)
# and even weirder you can put one into the other. Like
# [![Tooltip](Image.png)](https://example.com)
# Which going to give you a clickable image.
# For this version what we are going to do is next.
# If we got [![ then it's a clickable image
# If we got ![ then it's just image
# and if we got [ then it's a link.
if part.endswith("[!["):
# IMAGE LINK
newtree.append([block[0], part[:-3]])
tooltip = ""
imageurl = ""
url = ""
t = False
iu = False
skip = n
for le in block[-1][n:]: # For letters in the rest of text
skip = skip + 1
if le == "]":
t = True
elif le == ")" and t and not iu:
iu = True
elif le == ")" and t and iu:
break
elif not t:
tooltip = tooltip +le
elif t and not iu:
imageurl = imageurl + le
else:
url = url+le
tooltip = tooltip[tooltip.find("[")+1:]
imageurl = imageurl[imageurl.find("(")+1:]
url = url[url.find("(")+1:]
apnd = ["image_link", imageurl, url]
newtree.append(apnd)
part = ""
elif part.endswith("!["):
# IMAGE
newtree.append([block[0], part[:-2]])
tooltip = ""
url = ""
t = False
skip = n
for le in block[-1][n:]: # For letters in the rest of text
skip = skip + 1
if le == "]":
t = True
elif le == ")" and t:
break
elif not t:
tooltip = tooltip +le
else:
url = url+le
tooltip = tooltip[tooltip.find("[")+1:]
url = url[url.find("(")+1:]
apnd = ["image", tooltip, url]
newtree.append(apnd)
part = ""
elif part.endswith("[") and not block[-1][n:].startswith('[!['):
# LINK
newtree.append([block[0], part[:-1]])
tooltip = ""
url = ""
t = False
skip = n
for le in block[-1][n:]: # For letters in the rest of text
skip = skip + 1
if le == "]":
t = True
elif le == ")" and t:
break
elif not t:
tooltip = tooltip +le
else:
url = url+le
tooltip = tooltip[tooltip.find("[")+1:]
url = url[url.find("(")+1:]
apnd = ["link", tooltip, url]
newtree.append(apnd)
part = ""
# Now I want to deal with `, *, ** and ***. If you want to help me you
# can implement other types. Such as _, __, ___ and so on. Markdown is
# a very rich language. I'm going to use the cut down version I see other
# people use.
# BTW this is the time. Feb 28. When I switched from Gedit to GNU Emacs.
# Interesting feeling using this programm. I kind a love it even tho
# so many stuff in not intuitive. Like saving is not Ctrl - S but
# Ctrl - X -> Ctrl - S.
# Things like Alt-; to comment multiple lines at ones is HUGE. Also it
# was built by programmers for programmers. So it's a very good tool.
elif part.endswith("**") and not block[-1][n+2:].startswith('*'):
# DOUBLE **
newtree.append([block[0], part[:-2]])
if block[0] == "text":
block[0] = "text_b"
else:
block[0] = "text"
part = ""
elif part.endswith("*") and not block[-1][n+1:].startswith('*'):
# SINGLE *
newtree.append([block[0], part[:-1]])
if block[0] == "text":
block[0] = "text_i"
else:
block[0] = "text"
part = ""
elif part.endswith("`"):
# SINGLE `
newtree.append([block[0], part[:-1]])
if block[0] == "text":
block[0] = "text_c"
else:
block[0] = "text"
part = ""
newtree.append([block[0], part])
#newtree.append(["text", "\n"*20+" [END OF DOCUMENT] "])
tree = newtree
return(tree)
def search_convert(s):
# This function convers a chapter name into a link
# such links are use in notabug.org to link to chapters
# for example example.com/file.md#chapter-name
# With this url it will load the example.com/file.md and
# then skip to the "Chapter Name" chapter.
# This function transforms "Chapter Name" into "chapter-name"
l = " ./\|[]{}()?!@#$%^&*`~:;'\"=,<>"
s = s.lower().replace(" ","-")
r = ""
for i in s:
if i not in l:
r = r + i
return r
def convert(filename, isfile=True, fullpath=False):
if fullpath:
from modules import Set
config = Set.Load()
domain = config.get("domain", "")
textReturn = ""
if isfile:
text = open(filename)
text = text.read()
else:
text = filename
md = Open(text)
close_div = False
for n, i in enumerate(md):
close_not_here = False
if i[-1].startswith(">") or i[-1].startswith(">") and not close_div:
i[-1] = '
' + i[-1].replace(">", "").replace(">", "")
close_div = True
elif close_div:
i[-1] = i[-1].replace(">", "").replace(">", "")
try:
NEXT = md[n+1]
except:
NEXT = ["text", ""]
if NEXT[-1].startswith(">") or NEXT[-1].startswith(">"):
close_not_here = True
if close_div and "\n" in i[-1] and not close_not_here:
i[-1] = i[-1]+"
"
close_div = False
if type(i[0]) == str and i[0].startswith("text") and not i[0] == "text_cm":
tag = ""
ctag = ""
for f in ["i", "b", "c"]:
if f in i[0]:
f.replace("c", "code").replace("q", "blockquote")
tag = "<"+f+">"
ctag = ""+f+">"
if i[-1].startswith("lbry://"):
tag = ''
ctag = ""
if i[-1].startswith("http"):
tag = ''
ctag = ""
textReturn = textReturn + tag + i[-1] + ctag
elif i[0] == "text_cm":
tag = ""
ctag = ""
textReturn = textReturn + tag + i[-1] + ctag
elif type(i[0]) == int:
textReturn = textReturn + '\n
'
elif i[0] == "link":
if not i[-1]:
i[-1] = "/search?text="+urllib.parse.quote_plus(i[1])
if fullpath and i[-1].startswith("/"):
i[-1] = "https://"+domain+i[-1]
textReturn = textReturn + ''+i[1]+""
textReturn = textReturn.replace("\n", "\n ")
textReturn = textReturn + "\n\n", "( somebody was trying to pentest the site here with exiting the comment early. Good job )") + "\n\n"
textReturn = textReturn + " -->\n\n"
return textReturn