Compare commits
294 commits
Author | SHA1 | Date | |
---|---|---|---|
|
78d5c8c6fa | ||
|
caca92a6bc | ||
|
9ef1b7800b | ||
|
ea3315d1d6 | ||
|
68132c65a9 | ||
|
57e017ec8f | ||
|
42db3782ec | ||
|
9d93799d86 | ||
|
d93f463386 | ||
|
77988c1682 | ||
|
c79e07c9fa | ||
|
e454cdb4c9 | ||
|
98a10d1269 | ||
|
4f6748ae83 | ||
|
c1b2117df5 | ||
|
c4207338c8 | ||
|
5a01983203 | ||
|
ee8eb83d07 | ||
|
8d0f762067 | ||
|
8fa1482d18 | ||
|
00ae404642 | ||
|
230bfe4a41 | ||
|
df33fb9263 | ||
|
d72be1d920 | ||
|
7d12a90139 | ||
|
e1689a2a6c | ||
|
a8a6347d52 | ||
|
bdee1b4092 | ||
|
0d0d39380c | ||
|
7ff1a009da | ||
|
e3a332c7e1 | ||
|
33ee6e4b94 | ||
|
f6cde976a6 | ||
|
17944fa46a | ||
|
3c18ae8de2 | ||
|
84790720ff | ||
|
23690731af | ||
|
75628d8530 | ||
|
6e819b20f6 | ||
|
da0b6e5b79 | ||
|
28791f317b | ||
|
070287716b | ||
|
41054e77a6 | ||
|
6944e17f43 | ||
|
a224fe44c2 | ||
|
fa9dc35123 | ||
|
3a0882230a | ||
|
fbd683e094 | ||
|
2f15c920d4 | ||
|
01f6448e72 | ||
|
8fb1e2ead0 | ||
|
3b84db382c | ||
|
4fe6840a4e | ||
|
69e6fb51d1 | ||
|
f17110ab7f | ||
|
768743a200 | ||
|
7c652b22a1 | ||
|
a0fb4e579e | ||
|
519e1e4648 | ||
|
087f20c133 | ||
|
21e6603a26 | ||
|
ca41f5052e | ||
|
d739a98beb | ||
|
7c7ceed333 | ||
|
198473b62b | ||
|
d53d0a1d52 | ||
|
55577201a4 | ||
|
cacd21f840 | ||
|
9799b0a732 | ||
|
e985a60761 | ||
|
4b4cee9fcc | ||
|
acca80dc70 | ||
|
053c618458 | ||
|
2cf3526c74 | ||
|
d7e194cb5c | ||
|
9a8b1922fe | ||
|
12d627bd35 | ||
|
9397a40c9f | ||
|
6a4093f3f8 | ||
|
bac2969295 | ||
|
5dfd8dee1b | ||
|
06c43c3f71 | ||
|
881d86799b | ||
|
f7e4366a67 | ||
|
5b5f7e1c68 | ||
|
fecf67118c | ||
|
beade71aa6 | ||
|
ca8ff505d4 | ||
|
f5f12e1560 | ||
|
c4693c221f | ||
|
692c796770 | ||
|
de798ade36 | ||
|
a93c2c4538 | ||
|
53e0b7c47b | ||
|
070dda8739 | ||
|
f773569920 | ||
|
ad6fa4d725 | ||
|
d4ca71a89d | ||
|
c53cf4c1b3 | ||
|
ad5a30da9e | ||
|
ecda80b02d | ||
|
24cf937e14 | ||
|
0b002c8228 | ||
|
a56166ee51 | ||
|
ddca850c17 | ||
|
31ce612e2e | ||
|
eb8900c66a | ||
|
67da4142d5 | ||
|
fb0e567caf | ||
|
7e83b17b69 | ||
|
7c02c5b92d | ||
|
2a33f44317 | ||
|
b7037900f8 | ||
|
5d230a6b54 | ||
|
f0280b51b4 | ||
|
775e4881cb | ||
|
ec9f46f552 | ||
|
6c6e93cefc | ||
|
eab9bcf1ff | ||
|
751bc84ce5 | ||
|
41fd9f6844 | ||
|
7f1906d58b | ||
|
5be3551abe | ||
|
fc18151d77 | ||
|
13543b20e9 | ||
|
4eba7200d8 | ||
|
cd11e82676 | ||
|
c7c220ecd3 | ||
|
4d56339756 | ||
|
140353097e | ||
|
a1caea4a28 | ||
|
b1e10e7b09 | ||
|
ad27425471 | ||
|
81e9378b9d | ||
|
64040ea67a | ||
|
d99e200178 | ||
|
357aebbcce | ||
|
0eef62b5fd | ||
|
a3dd3dc626 | ||
|
f942bf8025 | ||
|
a05864404d | ||
|
70ad891dfa | ||
|
8e61cde0a0 | ||
|
8600077caa | ||
|
e7fdd21bac | ||
|
0bb6b6d833 | ||
|
b59ef28267 | ||
|
e926a2c1f6 | ||
|
1369ed0b48 | ||
|
f39fc11697 | ||
|
bffc0823be | ||
|
edbb22fcf0 | ||
|
843303301a | ||
|
be7fd7ddd8 | ||
|
bfcfe5f36b | ||
|
4c4da93ef3 | ||
|
c2b5878daa | ||
|
749d5095c9 | ||
|
ba77b61ae0 | ||
|
07915ce697 | ||
|
4b2221ce5e | ||
|
1d5b69bfe6 | ||
|
4d915587e0 | ||
|
83f2c28c20 | ||
|
47e467057d | ||
|
1ae32d638b | ||
|
7907ee3579 | ||
|
414ed1c130 | ||
|
3bb17b09d3 | ||
|
046f46267e | ||
|
adc3ffe194 | ||
|
76301d40ba | ||
|
963c450730 | ||
|
6e7bb994d0 | ||
|
95890cbabf | ||
|
69c2a91ec7 | ||
|
fbe09a692c | ||
|
0ccc6e7979 | ||
|
3eb74cc8b5 | ||
|
9bfb1656fe | ||
|
5d40d24804 | ||
|
79d3aadda8 | ||
|
eba59a0806 | ||
|
3fd4909cd1 | ||
|
b59372c1d8 | ||
|
c9e713e9df | ||
|
bc72d2f1c4 | ||
|
df539e5d01 | ||
|
1b89104101 | ||
|
3dc30c3b98 | ||
|
259df8d257 | ||
|
5460224cd6 | ||
|
29479a73a7 | ||
|
625f339709 | ||
|
d6fcfd888a | ||
|
5f7d9d4940 | ||
|
826fcb78ea | ||
|
45e87c3007 | ||
|
cdee0d0de7 | ||
|
e30f21dc85 | ||
|
df88c7e952 | ||
|
62a1a0eed5 | ||
|
93f9185c71 | ||
|
ef62242f42 | ||
|
4a356e86da | ||
|
ac95ca57af | ||
|
c28d5a716b | ||
|
59471f9c26 | ||
|
8028c6621c | ||
|
8abf6ad255 | ||
|
1f9a645c57 | ||
|
a5657aaf46 | ||
|
b6861dae9b | ||
|
cff147bb95 | ||
|
54d04dcf2c | ||
|
65c4f99c1c | ||
|
68c0fd9ed7 | ||
|
2bde06e4b9 | ||
|
5df6db6e96 | ||
|
62dfdc1adb | ||
|
fe55304184 | ||
|
bb76500f04 | ||
|
4e2efec66e | ||
|
8d650e032c | ||
|
f5d97957e7 | ||
|
36f539ef5d | ||
|
77461f71af | ||
|
caca13de61 | ||
|
7bdc117ba0 | ||
|
fc575dac80 | ||
|
d520ff2c68 | ||
|
25e49f8ff2 | ||
|
77d0efcf3c | ||
|
291a105269 | ||
|
682117a030 | ||
|
6d2e4aa6e6 | ||
|
77749f05ba | ||
|
10a855a8ba | ||
|
a9e76149e8 | ||
|
dd4aebdba4 | ||
|
b409d36de0 | ||
|
30af4a0136 | ||
|
a3b660fab5 | ||
|
65daeec216 | ||
|
ad1704fb39 | ||
|
44e6cb5ddc | ||
|
5171acc007 | ||
|
946314da94 | ||
|
5961327adf | ||
|
d2bbb36312 | ||
|
c7272e988a | ||
|
5b9846bcca | ||
|
8d849b8f06 | ||
|
c41c368f34 | ||
|
7ce4c4a2ce | ||
|
0d473cc4f6 | ||
|
2f701a887a | ||
|
8d56ef852f | ||
|
849ff11bfd | ||
|
45982f30c5 | ||
|
07413b499a | ||
|
92d8ef75f8 | ||
|
1b08bb0e61 | ||
|
bdd55c9965 | ||
|
14668c339e | ||
|
7d38aa7b29 | ||
|
3c3ea2138e | ||
|
7b23235f83 | ||
|
79ae630bc4 | ||
|
0d8db26e96 | ||
|
bacb91e82a | ||
|
577411cf50 | ||
|
e98061f2eb | ||
|
4548c41082 | ||
|
f7c80c2e5d | ||
|
2d6e53be32 | ||
|
419434db28 | ||
|
cd6ddaaa85 | ||
|
4d16a1d6e8 | ||
|
1fb1077a94 | ||
|
2c9c999c71 | ||
|
c754562f96 | ||
|
7a7de03c0f | ||
|
1b9ed266e0 | ||
|
eb2f6273e4 | ||
|
45cea808ed | ||
|
9a5b6b4e56 | ||
|
d63aba568d | ||
|
4cc0b71279 | ||
|
b8d66cb8c0 | ||
|
eff76d22eb | ||
|
8ebdf33213 | ||
|
81616c1d96 | ||
|
b477d18a16 |
63 changed files with 6070 additions and 1660 deletions
6
.gitignore
vendored
6
.gitignore
vendored
|
@ -1,4 +1,10 @@
|
||||||
bin/
|
bin/
|
||||||
e2e/persist
|
e2e/persist
|
||||||
|
e2e/supporty/supporty
|
||||||
.env
|
.env
|
||||||
blobsfiles
|
blobsfiles
|
||||||
|
ytsync_docker
|
||||||
|
|
||||||
|
e2e/config.json
|
||||||
|
|
||||||
|
e2e/cookies.txt
|
||||||
|
|
|
@ -9,7 +9,8 @@ builds:
|
||||||
- amd64
|
- amd64
|
||||||
ldflags:
|
ldflags:
|
||||||
- -X "{{ .Env.IMPORTPATH }}/meta.semVersion={{ .Tag }}" -X "{{ .Env.IMPORTPATH }}/meta.version={{ .Env.VERSIONSHORT }}" -X "{{ .Env.IMPORTPATH }}/meta.versionLong={{ .Env.VERSIONLONG }}" -X "{{ .Env.IMPORTPATH }}/meta.commitMsg={{ .Env.COMMITMSG }}"
|
- -X "{{ .Env.IMPORTPATH }}/meta.semVersion={{ .Tag }}" -X "{{ .Env.IMPORTPATH }}/meta.version={{ .Env.VERSIONSHORT }}" -X "{{ .Env.IMPORTPATH }}/meta.versionLong={{ .Env.VERSIONLONG }}" -X "{{ .Env.IMPORTPATH }}/meta.commitMsg={{ .Env.COMMITMSG }}"
|
||||||
archive:
|
archives:
|
||||||
|
- id: zip
|
||||||
name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}'
|
name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}'
|
||||||
replacements:
|
replacements:
|
||||||
linux: Linux
|
linux: Linux
|
||||||
|
@ -25,4 +26,3 @@ changelog:
|
||||||
exclude:
|
exclude:
|
||||||
- '^docs:'
|
- '^docs:'
|
||||||
- '^test:'
|
- '^test:'
|
||||||
|
|
||||||
|
|
10
.travis.yml
10
.travis.yml
|
@ -2,8 +2,7 @@ os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
language: go
|
language: go
|
||||||
go:
|
go:
|
||||||
- 1.12.7
|
- 1.17.x
|
||||||
- master
|
|
||||||
|
|
||||||
install: true
|
install: true
|
||||||
|
|
||||||
|
@ -21,15 +20,14 @@ addons:
|
||||||
packages:
|
packages:
|
||||||
- ffmpeg
|
- ffmpeg
|
||||||
- tree
|
- tree
|
||||||
- python-pip
|
- python3-pip
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- sudo pip install -U youtube-dl
|
- sudo pip3 install -U yt-dlp
|
||||||
- sudo add-apt-repository -y ppa:jonathonf/ffmpeg-4
|
- sudo add-apt-repository -y ppa:savoury1/ffmpeg4
|
||||||
|
|
||||||
env:
|
env:
|
||||||
global:
|
global:
|
||||||
- GO111MODULE=on
|
|
||||||
#GITHUB_TOKEN
|
#GITHUB_TOKEN
|
||||||
- secure: "Ps3KocRP5xnM3/uA99CeYhDTVxRIuW7fGyrtqBeRWZW0cXzeA4XCTKxqcFbrPUPw67XkrBVgE58JDdWoQEJ7tm67PjMm/ltp5Evhx/QAJDh+YSofXyGDVpG1mrTZFI66R3NVVJLkSGALMkuWWXvfYZeU//AworJbyRoaIK/CVt5OP23i5N4tdd5UXc5dfLuYqnKRynyMmCkz9c3yEIQMXoPhG2hx7l7L2BeMJvcKmVhkSN7nQayjnrbUXGm/IRqrb88lvkyBevN5E3IB2V5IKEieIPZjbD/N0IfcnAt89Z96tgDhtIbx3ZvXm92lsvHA8buqQpG9d2AmSi6GKs64lQcnGeM5o0wER2JHWl1OSa1Nr/UAo5Xb/PM65Yt3yZE8AuMKHBmbfDSBzdkTXx58AeDzFUd3kMXD/fFjeQQWyXFlOss3ygH9SObl827Txmz9OJqZaxabs5Q3AP6m3EjKjz7zfLfrgpcxJM2WBiU1bN0ZxUgZkImy/CHk5gCZ7vhcnaLiDO4HZnzY/aRJwKYQPE5i0O2nHpIfovqkc0DFBA7U/7Cjin7e1E0UZvF3meLOxMqkfc6X7QTxqQpt2Tej6jlpdxw4CTLwGUhGkAw9IAPkUB3L0EbZ1/ksGhNvGDvUeSTq8hYdMAPmA+k9jS6653V4SQ+qBMy5++tbr5AeZQI="
|
- secure: "Ps3KocRP5xnM3/uA99CeYhDTVxRIuW7fGyrtqBeRWZW0cXzeA4XCTKxqcFbrPUPw67XkrBVgE58JDdWoQEJ7tm67PjMm/ltp5Evhx/QAJDh+YSofXyGDVpG1mrTZFI66R3NVVJLkSGALMkuWWXvfYZeU//AworJbyRoaIK/CVt5OP23i5N4tdd5UXc5dfLuYqnKRynyMmCkz9c3yEIQMXoPhG2hx7l7L2BeMJvcKmVhkSN7nQayjnrbUXGm/IRqrb88lvkyBevN5E3IB2V5IKEieIPZjbD/N0IfcnAt89Z96tgDhtIbx3ZvXm92lsvHA8buqQpG9d2AmSi6GKs64lQcnGeM5o0wER2JHWl1OSa1Nr/UAo5Xb/PM65Yt3yZE8AuMKHBmbfDSBzdkTXx58AeDzFUd3kMXD/fFjeQQWyXFlOss3ygH9SObl827Txmz9OJqZaxabs5Q3AP6m3EjKjz7zfLfrgpcxJM2WBiU1bN0ZxUgZkImy/CHk5gCZ7vhcnaLiDO4HZnzY/aRJwKYQPE5i0O2nHpIfovqkc0DFBA7U/7Cjin7e1E0UZvF3meLOxMqkfc6X7QTxqQpt2Tej6jlpdxw4CTLwGUhGkAw9IAPkUB3L0EbZ1/ksGhNvGDvUeSTq8hYdMAPmA+k9jS6653V4SQ+qBMy5++tbr5AeZQI="
|
||||||
|
|
||||||
|
|
111
0001-lbry-patch.patch
Normal file
111
0001-lbry-patch.patch
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
From 30380338ba9af01696c94b61f0597131638eaec1 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Niko Storni <niko@lbry.io>
|
||||||
|
Date: Mon, 16 Dec 2019 00:13:36 +0100
|
||||||
|
Subject: [PATCH] lbry-patch
|
||||||
|
|
||||||
|
---
|
||||||
|
youtube_dl/extractor/youtube.py | 45 +++++++++++++++++++++++++--------
|
||||||
|
1 file changed, 35 insertions(+), 10 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
|
||||||
|
index b913d07a6..cd66a5b01 100644
|
||||||
|
--- a/youtube_dl/extractor/youtube.py
|
||||||
|
+++ b/youtube_dl/extractor/youtube.py
|
||||||
|
@@ -10,6 +10,7 @@ import random
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
+import subprocess
|
||||||
|
|
||||||
|
from .common import InfoExtractor, SearchInfoExtractor
|
||||||
|
from ..jsinterp import JSInterpreter
|
||||||
|
@@ -536,6 +537,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
|
|
||||||
|
_GEO_BYPASS = False
|
||||||
|
|
||||||
|
+ _WGET_429_RATE_LIMIT = 8191
|
||||||
|
+ _WGET_BINARY = "wget"
|
||||||
|
+
|
||||||
|
IE_NAME = 'youtube'
|
||||||
|
_TESTS = [
|
||||||
|
{
|
||||||
|
@@ -1254,6 +1258,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
|
""" Return a string representation of a signature """
|
||||||
|
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
|
||||||
|
|
||||||
|
+ def _rate_limit_download(self, url, video_id, note=None):
|
||||||
|
+ if note is None:
|
||||||
|
+ self.report_download_webpage(video_id)
|
||||||
|
+ elif note is not False:
|
||||||
|
+ if video_id is None:
|
||||||
|
+ self.to_screen('%s' % (note,))
|
||||||
|
+ else:
|
||||||
|
+ self.to_screen('%s: %s' % (video_id, note))
|
||||||
|
+ source_address = self._downloader.params.get('source_address')
|
||||||
|
+ return subprocess.run([self._WGET_BINARY, '-q', '--limit-rate', str(self._WGET_429_RATE_LIMIT), '--bind-address', source_address, '-O', '-', url], check=True, stdout=subprocess.PIPE).stdout.decode(encoding='UTF-8')
|
||||||
|
+
|
||||||
|
def _extract_signature_function(self, video_id, player_url, example_sig):
|
||||||
|
id_m = re.match(
|
||||||
|
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2,3}_[A-Z]{2})?/base)?\.(?P<ext>[a-z]+)$',
|
||||||
|
@@ -1678,7 +1693,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
|
|
||||||
|
# Get video webpage
|
||||||
|
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
|
||||||
|
- video_webpage = self._download_webpage(url, video_id)
|
||||||
|
+ video_webpage = self._rate_limit_download(url, video_id)
|
||||||
|
|
||||||
|
# Attempt to extract SWF player URL
|
||||||
|
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
|
||||||
|
@@ -1736,10 +1751,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
|
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
|
||||||
|
})
|
||||||
|
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
|
||||||
|
- video_info_webpage = self._download_webpage(
|
||||||
|
+ video_info_webpage = self._rate_limit_download(
|
||||||
|
video_info_url, video_id,
|
||||||
|
- note='Refetching age-gated info webpage',
|
||||||
|
- errnote='unable to download video info webpage')
|
||||||
|
+ note='Refetching age-gated info webpage')
|
||||||
|
video_info = compat_parse_qs(video_info_webpage)
|
||||||
|
pl_response = video_info.get('player_response', [None])[0]
|
||||||
|
player_response = extract_player_response(pl_response, video_id)
|
||||||
|
@@ -1777,7 +1791,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
|
# The general idea is to take a union of itags of both DASH manifests (for example
|
||||||
|
# video with such 'manifest behavior' see https://github.com/ytdl-org/youtube-dl/issues/6093)
|
||||||
|
self.report_video_info_webpage_download(video_id)
|
||||||
|
- for el in ('embedded', 'detailpage', 'vevo', ''):
|
||||||
|
+ for el in ('', 'embedded', 'detailpage', 'vevo'):
|
||||||
|
query = {
|
||||||
|
'video_id': video_id,
|
||||||
|
'ps': 'default',
|
||||||
|
@@ -1789,11 +1803,22 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
|
query['el'] = el
|
||||||
|
if sts:
|
||||||
|
query['sts'] = sts
|
||||||
|
- video_info_webpage = self._download_webpage(
|
||||||
|
- '%s://www.youtube.com/get_video_info' % proto,
|
||||||
|
- video_id, note=False,
|
||||||
|
- errnote='unable to download video info webpage',
|
||||||
|
- fatal=False, query=query)
|
||||||
|
+
|
||||||
|
+ if el == '':
|
||||||
|
+ base_url = 'https://youtube.com/get_video_info?video_id={}'.format(video_id)
|
||||||
|
+ else:
|
||||||
|
+ base_url = 'https://youtube.com/get_video_info'
|
||||||
|
+
|
||||||
|
+ for q in query:
|
||||||
|
+ if q is None or q is "":
|
||||||
|
+ continue
|
||||||
|
+ if query[q] is None or query[q] is "":
|
||||||
|
+ continue
|
||||||
|
+
|
||||||
|
+ base_url = base_url + "?{}={}".format(q, query[q])
|
||||||
|
+
|
||||||
|
+ video_info_webpage = self._rate_limit_download(base_url, video_id)
|
||||||
|
+
|
||||||
|
if not video_info_webpage:
|
||||||
|
continue
|
||||||
|
get_video_info = compat_parse_qs(video_info_webpage)
|
||||||
|
--
|
||||||
|
2.17.1
|
||||||
|
|
15
LICENSE
Normal file
15
LICENSE
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2017-2020 LBRY Inc.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the
|
||||||
|
following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||||
|
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
2
Makefile
2
Makefile
|
@ -13,7 +13,7 @@ LDFLAGS = -ldflags "-X ${IMPORT_PATH}/meta.Version=${VERSION} -X ${IMPORT_PATH}/
|
||||||
|
|
||||||
|
|
||||||
build:
|
build:
|
||||||
mkdir -p ${BIN_DIR} && CGO_ENABLED=0 go build ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/${BINARY} main.go
|
mkdir -p ${BIN_DIR} && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/${BINARY} main.go
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
if [ -f ${BIN_DIR}/${BINARY} ]; then rm ${BIN_DIR}/${BINARY}; fi
|
if [ -f ${BIN_DIR}/${BINARY} ]; then rm ${BIN_DIR}/${BINARY}; fi
|
||||||
|
|
39
README.md
39
README.md
|
@ -8,23 +8,17 @@ With the support of said database, the tool is also able to keep all the channel
|
||||||
|
|
||||||
|
|
||||||
# Requirements
|
# Requirements
|
||||||
- lbrynet SDK https://github.com/lbryio/lbry/releases (We strive to keep the latest release of ytsync compatible with the latest major release of the SDK)
|
- lbrynet SDK https://github.com/lbryio/lbry-sdk/releases (We strive to keep the latest release of ytsync compatible with the latest major release of the SDK)
|
||||||
- a lbrycrd node running (localhost or on a remote machine) with credits in it
|
- a lbrycrd node running (localhost or on a remote machine) with credits in it
|
||||||
|
- internal-apis (you cannot run this one yourself)
|
||||||
|
- python3-pip
|
||||||
|
- yt-dlp (`pip3 install -U yt-dlp`)
|
||||||
|
- ffmpeg (latest)
|
||||||
|
|
||||||
# Setup
|
# Setup
|
||||||
- make sure daemon is stopped and can be controlled through `systemctl` (find example below)
|
- make sure daemon is stopped and can be controlled through `systemctl` (find example below)
|
||||||
- extract the ytsync binary anywhere
|
- extract the ytsync binary anywhere
|
||||||
- add the environment variables necessary to the tool
|
- create and fill `config.json` using [this example](config.json.example)
|
||||||
- export SLACK_TOKEN="a-token-to-spam-your-slack"
|
|
||||||
- export SLACK_CHANNEL="youtube-status"
|
|
||||||
- export YOUTUBE_API_KEY="youtube-api-key"
|
|
||||||
- export LBRY_WEB_API="https://lbry-api-url-here"
|
|
||||||
- export LBRY_API_TOKEN="internal-apis-token-for-ytsync-user"
|
|
||||||
- export LBRYCRD_STRING="tcp://user:password@host:5429"
|
|
||||||
- export AWS_S3_ID="THE-ID-LIES-HERE"
|
|
||||||
- export AWS_S3_SECRET="THE-SECRET-LIES-HERE"
|
|
||||||
- export AWS_S3_REGION="us-east-1"
|
|
||||||
- export AWS_S3_BUCKET="ytsync-wallets"
|
|
||||||
|
|
||||||
## systemd script example
|
## systemd script example
|
||||||
`/etc/systemd/system/lbrynet.service`
|
`/etc/systemd/system/lbrynet.service`
|
||||||
|
@ -55,23 +49,26 @@ Usage:
|
||||||
|
|
||||||
Flags:
|
Flags:
|
||||||
--after int Specify from when to pull jobs [Unix time](Default: 0)
|
--after int Specify from when to pull jobs [Unix time](Default: 0)
|
||||||
--before int Specify until when to pull jobs [Unix time](Default: current Unix time) (default current timestamp)
|
--before int Specify until when to pull jobs [Unix time](Default: current Unix time) (default 1669311891)
|
||||||
--channelID string If specified, only this channel will be synced.
|
--channelID string If specified, only this channel will be synced.
|
||||||
--concurrent-jobs int how many jobs to process concurrently (default 1)
|
--concurrent-jobs int how many jobs to process concurrently (default 1)
|
||||||
-h, --help help for ytsync
|
-h, --help help for ytsync
|
||||||
--limit int limit the amount of channels to sync
|
--limit int limit the amount of channels to sync
|
||||||
--max-length float Maximum video length to process (in hours) (default 2)
|
--max-length int Maximum video length to process (in hours) (default 2)
|
||||||
--max-size int Maximum video size to process (in MB) (default 2048)
|
--max-size int Maximum video size to process (in MB) (default 2048)
|
||||||
--max-tries int Number of times to try a publish that fails (default 3)
|
--max-tries int Number of times to try a publish that fails (default 3)
|
||||||
|
--no-transfers Skips the transferring process of videos, channels and supports
|
||||||
|
--quick Look up only the last 50 videos from youtube
|
||||||
--remove-db-unpublished Remove videos from the database that are marked as published but aren't really published
|
--remove-db-unpublished Remove videos from the database that are marked as published but aren't really published
|
||||||
--run-once Whether the process should be stopped after one cycle or not
|
--run-once Whether the process should be stopped after one cycle or not
|
||||||
--skip-space-check Do not perform free space check on startup
|
--skip-space-check Do not perform free space check on startup
|
||||||
--status string Specify which queue to pull from. Overrides --update
|
--status string Specify which queue to pull from. Overrides --update
|
||||||
--stop-on-error If a publish fails, stop all publishing and exit
|
--status2 string Specify which secondary queue to pull from.
|
||||||
--takeover-existing-channel If channel exists and we don't own it, take over the channel
|
--takeover-existing-channel If channel exists and we don't own it, take over the channel
|
||||||
--update Update previously synced channels instead of syncing new ones
|
--update Update previously synced channels instead of syncing new ones
|
||||||
--upgrade-metadata Upgrade videos if they're on the old metadata version
|
--upgrade-metadata Upgrade videos if they're on the old metadata version
|
||||||
--videos-limit int how many videos to process per channel (default 1000)
|
--videos-limit int how many videos to process per channel (leave 0 for automatic detection)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Running from Source
|
## Running from Source
|
||||||
|
@ -88,17 +85,17 @@ Contributions to this project are welcome, encouraged, and compensated. For more
|
||||||
|
|
||||||
## Security
|
## Security
|
||||||
|
|
||||||
We take security seriously. Please contact [security@lbry.io](mailto:security@lbry.io) regarding any security issues. Our PGP key is [here](https://keybase.io/lbry/key.asc) if you need it.
|
We take security seriously. Please contact [security@lbry.io](mailto:security@lbry.io) regarding any security issues. Our PGP key is [here](https://lbry.com/faq/pgp-key) if you need it.
|
||||||
|
|
||||||
## Contact
|
## Contact
|
||||||
|
|
||||||
The primary contact for this project is [Niko Storni](https://github.com/nikooo777) (niko@lbry.io).
|
The primary contact for this project is [Niko Storni](https://github.com/nikooo777) (niko@lbry.com).
|
||||||
|
|
||||||
## Additional Info and Links
|
## Additional Info and Links
|
||||||
|
|
||||||
- [https://lbry.io](https://lbry.io) - The live LBRY website
|
- [https://lbry.com](https://lbry.com) - The live LBRY website
|
||||||
- [Discord Chat](https://chat.lbry.io) - A chat room for the LBRYians
|
- [Discord Chat](https://chat.lbry.com) - A chat room for the LBRYians
|
||||||
- [Email us](mailto:hello@lbry.io) - LBRY Support email
|
- [Email us](mailto:hello@lbry.com) - LBRY Support email
|
||||||
- [Twitter](https://twitter.com/@lbryio) - LBRY Twitter page
|
- [Twitter](https://twitter.com/@lbryio) - LBRY Twitter page
|
||||||
- [Facebook](https://www.facebook.com/lbryio/) - LBRY Facebook page
|
- [Facebook](https://www.facebook.com/lbryio/) - LBRY Facebook page
|
||||||
- [Reddit](https://reddit.com/r/lbry) - LBRY Reddit page
|
- [Reddit](https://reddit.com/r/lbry) - LBRY Reddit page
|
||||||
|
|
|
@ -7,16 +7,18 @@ import (
|
||||||
"os/user"
|
"os/user"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/extras/errors"
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
"github.com/lbryio/reflector.go/cmd"
|
"github.com/lbryio/reflector.go/cmd"
|
||||||
"github.com/lbryio/reflector.go/db"
|
"github.com/lbryio/reflector.go/db"
|
||||||
"github.com/lbryio/reflector.go/reflector"
|
"github.com/lbryio/reflector.go/reflector"
|
||||||
"github.com/lbryio/reflector.go/store"
|
"github.com/lbryio/reflector.go/store"
|
||||||
"github.com/lbryio/ytsync/util"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
"github.com/lbryio/ytsync/v5/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var dbHandle *db.SQL
|
||||||
|
|
||||||
func ReflectAndClean() error {
|
func ReflectAndClean() error {
|
||||||
err := reflectBlobs()
|
err := reflectBlobs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -44,6 +46,7 @@ func reflectBlobs() error {
|
||||||
if util.IsBlobReflectionOff() {
|
if util.IsBlobReflectionOff() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
logrus.Infoln("reflecting blobs...")
|
||||||
//make sure lbrynet is off
|
//make sure lbrynet is off
|
||||||
running, err := util.IsLbrynetRunning()
|
running, err := util.IsLbrynetRunning()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -52,8 +55,8 @@ func reflectBlobs() error {
|
||||||
if running {
|
if running {
|
||||||
return errors.Prefix("cannot reflect blobs as the daemon is running", err)
|
return errors.Prefix("cannot reflect blobs as the daemon is running", err)
|
||||||
}
|
}
|
||||||
|
logrus.SetLevel(logrus.InfoLevel)
|
||||||
dbHandle := new(db.SQL)
|
defer logrus.SetLevel(logrus.DebugLevel)
|
||||||
ex, err := os.Executable()
|
ex, err := os.Executable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
|
@ -63,23 +66,17 @@ func reflectBlobs() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
if dbHandle == nil {
|
||||||
|
dbHandle = new(db.SQL)
|
||||||
err = dbHandle.Connect(config.DBConn)
|
err = dbHandle.Connect(config.DBConn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
err := dbHandle.CloseDB()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to close db handle: %s", err.Error())
|
|
||||||
}
|
}
|
||||||
|
st := store.NewDBBackedStore(store.NewS3Store(config.AwsID, config.AwsSecret, config.BucketRegion, config.BucketName), dbHandle, false)
|
||||||
}()
|
|
||||||
st := store.NewDBBackedS3Store(
|
|
||||||
store.NewS3BlobStore(config.AwsID, config.AwsSecret, config.BucketRegion, config.BucketName),
|
|
||||||
dbHandle)
|
|
||||||
|
|
||||||
uploadWorkers := 10
|
uploadWorkers := 10
|
||||||
uploader := reflector.NewUploader(dbHandle, st, uploadWorkers, false)
|
uploader := reflector.NewUploader(dbHandle, st, uploadWorkers, false, false)
|
||||||
usr, err := user.Current()
|
usr, err := user.Current()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
|
|
35
config.json.example
Normal file
35
config.json.example
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
{
|
||||||
|
"slack_token": "",
|
||||||
|
"slack_channel": "ytsync-dev",
|
||||||
|
"internal_apis_endpoint": "http://localhost:15400",
|
||||||
|
"internal_apis_auth_token": "ytsyntoken",
|
||||||
|
"lbrycrd_string": "tcp://lbry:lbry@localhost:15200",
|
||||||
|
"wallet_s3_config": {
|
||||||
|
"id": "",
|
||||||
|
"secret": "",
|
||||||
|
"region": "us-east-1",
|
||||||
|
"bucket": "ytsync-wallets",
|
||||||
|
"endpoint": ""
|
||||||
|
},
|
||||||
|
"blockchaindb_s3_config": {
|
||||||
|
"id": "",
|
||||||
|
"secret": "",
|
||||||
|
"region": "us-east-1",
|
||||||
|
"bucket": "blockchaindbs",
|
||||||
|
"endpoint": ""
|
||||||
|
},
|
||||||
|
"thumbnails_s3_config": {
|
||||||
|
"id": "",
|
||||||
|
"secret": "",
|
||||||
|
"region": "us-east-1",
|
||||||
|
"bucket": "thumbnails.lbry.com",
|
||||||
|
"endpoint": ""
|
||||||
|
},
|
||||||
|
"aws_thumbnails_s3_config": {
|
||||||
|
"id": "",
|
||||||
|
"secret": "",
|
||||||
|
"region": "us-east-1",
|
||||||
|
"bucket": "thumbnails.lbry.com",
|
||||||
|
"endpoint": ""
|
||||||
|
}
|
||||||
|
}
|
75
configs/configs.go
Normal file
75
configs/configs.go
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
package configs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/tkanos/gonfig"
|
||||||
|
)
|
||||||
|
|
||||||
|
type S3Configs struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Secret string `json:"secret"`
|
||||||
|
Region string `json:"region"`
|
||||||
|
Bucket string `json:"bucket"`
|
||||||
|
Endpoint string `json:"endpoint"`
|
||||||
|
}
|
||||||
|
type Configs struct {
|
||||||
|
SlackToken string `json:"slack_token"`
|
||||||
|
SlackChannel string `json:"slack_channel"`
|
||||||
|
InternalApisEndpoint string `json:"internal_apis_endpoint"`
|
||||||
|
InternalApisAuthToken string `json:"internal_apis_auth_token"`
|
||||||
|
LbrycrdString string `json:"lbrycrd_string"`
|
||||||
|
WalletS3Config S3Configs `json:"wallet_s3_config"`
|
||||||
|
BlockchaindbS3Config S3Configs `json:"blockchaindb_s3_config"`
|
||||||
|
AWSThumbnailsS3Config S3Configs `json:"aws_thumbnails_s3_config"`
|
||||||
|
ThumbnailsS3Config S3Configs `json:"thumbnails_s3_config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var Configuration *Configs
|
||||||
|
|
||||||
|
func Init(configPath string) error {
|
||||||
|
if Configuration != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c := Configs{}
|
||||||
|
err := gonfig.GetConf(configPath, &c)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
Configuration = &c
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *S3Configs) GetS3AWSConfig() *aws.Config {
|
||||||
|
return &aws.Config{
|
||||||
|
Credentials: credentials.NewStaticCredentials(s.ID, s.Secret, ""),
|
||||||
|
Region: &s.Region,
|
||||||
|
Endpoint: &s.Endpoint,
|
||||||
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (c *Configs) GetHostname() string {
|
||||||
|
var hostname string
|
||||||
|
|
||||||
|
var err error
|
||||||
|
hostname, err = os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
log.Error("could not detect system hostname")
|
||||||
|
hostname = "ytsync_unknown"
|
||||||
|
}
|
||||||
|
reg, err := regexp.Compile("[^a-zA-Z0-9_]+")
|
||||||
|
if err == nil {
|
||||||
|
hostname = reg.ReplaceAllString(hostname, "_")
|
||||||
|
|
||||||
|
}
|
||||||
|
if len(hostname) > 30 {
|
||||||
|
hostname = hostname[0:30]
|
||||||
|
}
|
||||||
|
return hostname
|
||||||
|
}
|
315
downloader/downloader.go
Normal file
315
downloader/downloader.go
Normal file
|
@ -0,0 +1,315 @@
|
||||||
|
package downloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
|
"github.com/lbryio/ytsync/v5/downloader/ytdl"
|
||||||
|
"github.com/lbryio/ytsync/v5/ip_manager"
|
||||||
|
"github.com/lbryio/ytsync/v5/sdk"
|
||||||
|
"github.com/lbryio/ytsync/v5/shared"
|
||||||
|
util2 "github.com/lbryio/ytsync/v5/util"
|
||||||
|
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetPlaylistVideoIDs(channelName string, maxVideos int, stopChan stop.Chan, pool *ip_manager.IPPool) ([]string, error) {
|
||||||
|
args := []string{"--skip-download", "https://www.youtube.com/channel/" + channelName + "/videos", "--get-id", "--flat-playlist", "--cookies", "cookies.txt", "--playlist-end", fmt.Sprintf("%d", maxVideos)}
|
||||||
|
ids, err := run(channelName, args, stopChan, pool)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
videoIDs := make([]string, 0, maxVideos)
|
||||||
|
for i, v := range ids {
|
||||||
|
if v == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if i >= maxVideos {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
videoIDs = append(videoIDs, v)
|
||||||
|
}
|
||||||
|
return videoIDs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const releaseTimeFormat = "2006-01-02, 15:04:05 (MST)"
|
||||||
|
|
||||||
|
func GetVideoInformation(videoID string, stopChan stop.Chan, pool *ip_manager.IPPool) (*ytdl.YtdlVideo, error) {
|
||||||
|
args := []string{
|
||||||
|
"--skip-download",
|
||||||
|
"--write-info-json",
|
||||||
|
fmt.Sprintf("https://www.youtube.com/watch?v=%s", videoID),
|
||||||
|
"--cookies",
|
||||||
|
"cookies.txt",
|
||||||
|
"-o",
|
||||||
|
path.Join(util2.GetVideoMetadataDir(), videoID),
|
||||||
|
}
|
||||||
|
_, err := run(videoID, args, stopChan, pool)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(path.Join(util2.GetVideoMetadataDir(), videoID+".info.json"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
// defer the closing of our jsonFile so that we can parse it later on
|
||||||
|
defer f.Close()
|
||||||
|
// read our opened jsonFile as a byte array.
|
||||||
|
byteValue, _ := ioutil.ReadAll(f)
|
||||||
|
|
||||||
|
var video *ytdl.YtdlVideo
|
||||||
|
err = json.Unmarshal(byteValue, &video)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return video, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errNotScraped = errors.Base("not yet scraped by caa.iti.gr")
|
||||||
|
var errUploadTimeEmpty = errors.Base("upload time is empty")
|
||||||
|
var errStatusParse = errors.Base("could not parse status, got number, need string")
|
||||||
|
var errConnectionIssue = errors.Base("there was a connection issue with the api")
|
||||||
|
|
||||||
|
func slack(format string, a ...interface{}) {
|
||||||
|
fmt.Printf(format+"\n", a...)
|
||||||
|
util.SendToSlack(format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func triggerScrape(videoID string, ip *net.TCPAddr) error {
|
||||||
|
//slack("Triggering scrape for %s", videoID)
|
||||||
|
u, err := url.Parse("https://caa.iti.gr/verify_videoV3")
|
||||||
|
q := u.Query()
|
||||||
|
q.Set("twtimeline", "0")
|
||||||
|
q.Set("url", "https://www.youtube.com/watch?v="+videoID)
|
||||||
|
u.RawQuery = q.Encode()
|
||||||
|
//slack("GET %s", u.String())
|
||||||
|
|
||||||
|
client := getClient(ip)
|
||||||
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
req.Header.Set("User-Agent", ChromeUA)
|
||||||
|
|
||||||
|
res, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
var response struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
VideoURL string `json:"video_url"`
|
||||||
|
}
|
||||||
|
err = json.NewDecoder(res.Body).Decode(&response)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "cannot unmarshal number") {
|
||||||
|
return errors.Err(errStatusParse)
|
||||||
|
}
|
||||||
|
if strings.Contains(err.Error(), "no route to host") {
|
||||||
|
return errors.Err(errConnectionIssue)
|
||||||
|
}
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch response.Status {
|
||||||
|
case "removed_video":
|
||||||
|
return errors.Err("video previously removed from service")
|
||||||
|
case "no_video":
|
||||||
|
return errors.Err("they say 'video cannot be found'. wtf?")
|
||||||
|
default:
|
||||||
|
spew.Dump(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
//https://caa.iti.gr/caa/api/v4/videos/reports/h-tuxHS5lSM
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUploadTime(config *sdk.APIConfig, videoID string, ip *net.TCPAddr, uploadDate string) (string, error) {
|
||||||
|
//slack("Getting upload time for %s", videoID)
|
||||||
|
release, err := config.GetReleasedDate(videoID)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
ytdlUploadDate, err := time.Parse("20060102", uploadDate)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
if release != nil {
|
||||||
|
//const sqlTimeFormat = "2006-01-02 15:04:05"
|
||||||
|
sqlTime, err := time.ParseInLocation(time.RFC3339, release.ReleaseTime, time.UTC)
|
||||||
|
if err == nil {
|
||||||
|
hoursDiff := math.Abs(sqlTime.Sub(ytdlUploadDate).Hours())
|
||||||
|
if hoursDiff > 48 {
|
||||||
|
logrus.Infof("upload day from APIs differs from the ytdl one by more than 2 days.")
|
||||||
|
} else {
|
||||||
|
return sqlTime.Format(releaseTimeFormat), nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ytdlUploadDate.Format(releaseTimeFormat), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getClient(ip *net.TCPAddr) *http.Client {
|
||||||
|
if ip == nil {
|
||||||
|
return http.DefaultClient
|
||||||
|
}
|
||||||
|
|
||||||
|
return &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
DialContext: (&net.Dialer{
|
||||||
|
LocalAddr: ip,
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).DialContext,
|
||||||
|
MaxIdleConns: 100,
|
||||||
|
IdleConnTimeout: 90 * time.Second,
|
||||||
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
|
ExpectContinueTimeout: 1 * time.Second,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
GoogleBotUA = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
|
||||||
|
ChromeUA = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"
|
||||||
|
maxAttempts = 3
|
||||||
|
extractionError = "YouTube said: Unable to extract video data"
|
||||||
|
throttledError = "HTTP Error 429"
|
||||||
|
AlternateThrottledError = "returned non-zero exit status 8"
|
||||||
|
youtubeDlError = "exit status 1"
|
||||||
|
videoPremiereError = "Premieres in"
|
||||||
|
liveEventError = "This live event will begin in"
|
||||||
|
)
|
||||||
|
|
||||||
|
func run(use string, args []string, stopChan stop.Chan, pool *ip_manager.IPPool) ([]string, error) {
|
||||||
|
var useragent []string
|
||||||
|
var lastError error
|
||||||
|
for attempts := 0; attempts < maxAttempts; attempts++ {
|
||||||
|
sourceAddress, err := getIPFromPool(use, stopChan, pool)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
argsForCommand := append(args, "--source-address", sourceAddress)
|
||||||
|
argsForCommand = append(argsForCommand, useragent...)
|
||||||
|
binary := "yt-dlp"
|
||||||
|
cmd := exec.Command(binary, argsForCommand...)
|
||||||
|
|
||||||
|
res, err := runCmd(cmd, stopChan)
|
||||||
|
pool.ReleaseIP(sourceAddress)
|
||||||
|
if err == nil {
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
lastError = err
|
||||||
|
if strings.Contains(err.Error(), youtubeDlError) {
|
||||||
|
if util.SubstringInSlice(err.Error(), shared.ErrorsNoRetry) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if strings.Contains(err.Error(), extractionError) {
|
||||||
|
logrus.Warnf("known extraction error: %s", errors.FullTrace(err))
|
||||||
|
useragent = nextUA(useragent)
|
||||||
|
}
|
||||||
|
if strings.Contains(err.Error(), throttledError) || strings.Contains(err.Error(), AlternateThrottledError) {
|
||||||
|
pool.SetThrottled(sourceAddress)
|
||||||
|
//we don't want throttle errors to count toward the max retries
|
||||||
|
attempts--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, lastError
|
||||||
|
}
|
||||||
|
|
||||||
|
func nextUA(current []string) []string {
|
||||||
|
if len(current) == 0 {
|
||||||
|
return []string{"--user-agent", GoogleBotUA}
|
||||||
|
}
|
||||||
|
return []string{"--user-agent", ChromeUA}
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCmd(cmd *exec.Cmd, stopChan stop.Chan) ([]string, error) {
|
||||||
|
logrus.Infof("running yt-dlp cmd: %s", strings.Join(cmd.Args, " "))
|
||||||
|
var err error
|
||||||
|
stderr, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
err = cmd.Start()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
outLog, err := ioutil.ReadAll(stdout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
errorLog, err := ioutil.ReadAll(stderr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
done := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
done <- cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-stopChan:
|
||||||
|
err := cmd.Process.Kill()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Prefix("failed to kill command after stopper cancellation", err)
|
||||||
|
}
|
||||||
|
return nil, errors.Err("interrupted by user")
|
||||||
|
case err := <-done:
|
||||||
|
if err != nil {
|
||||||
|
//return nil, errors.Prefix("yt-dlp "+strings.Join(cmd.Args, " ")+" ["+string(errorLog)+"]", err)
|
||||||
|
return nil, errors.Prefix(string(errorLog), err)
|
||||||
|
}
|
||||||
|
return strings.Split(strings.Replace(string(outLog), "\r\n", "\n", -1), "\n"), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIPFromPool(use string, stopChan stop.Chan, pool *ip_manager.IPPool) (sourceAddress string, err error) {
|
||||||
|
for {
|
||||||
|
sourceAddress, err = pool.GetIP(use)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ip_manager.ErrAllThrottled) {
|
||||||
|
select {
|
||||||
|
case <-stopChan:
|
||||||
|
return "", errors.Err("interrupted by user")
|
||||||
|
|
||||||
|
default:
|
||||||
|
time.Sleep(ip_manager.IPCooldownPeriod)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
40
downloader/downloader_test.go
Normal file
40
downloader/downloader_test.go
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
package downloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/lbryio/ytsync/v5/ip_manager"
|
||||||
|
"github.com/lbryio/ytsync/v5/sdk"
|
||||||
|
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetPlaylistVideoIDs(t *testing.T) {
|
||||||
|
videoIDs, err := GetPlaylistVideoIDs("UCJ0-OtVpF0wOKEqT2Z1HEtA", 50, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
for _, id := range videoIDs {
|
||||||
|
println(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetVideoInformation(t *testing.T) {
|
||||||
|
s := stop.New()
|
||||||
|
ip, err := ip_manager.GetIPPool(s)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
video, err := GetVideoInformation("kDGOHNpRjzc", s.Ch(), ip)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, video)
|
||||||
|
logrus.Info(video.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_getUploadTime(t *testing.T) {
|
||||||
|
configs := sdk.APIConfig{}
|
||||||
|
got, err := getUploadTime(&configs, "kDGOHNpRjzc", nil, "20060102")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
t.Log(got)
|
||||||
|
}
|
137
downloader/ytdl/Video.go
Normal file
137
downloader/ytdl/Video.go
Normal file
|
@ -0,0 +1,137 @@
|
||||||
|
package ytdl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lbryio/ytsync/v5/sdk"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type YtdlVideo struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
Thumbnails []Thumbnail `json:"thumbnails"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
ChannelID string `json:"channel_id"`
|
||||||
|
Duration int `json:"duration"`
|
||||||
|
Categories []string `json:"categories"`
|
||||||
|
Tags []string `json:"tags"`
|
||||||
|
IsLive bool `json:"is_live"`
|
||||||
|
LiveStatus string `json:"live_status"`
|
||||||
|
ReleaseTimestamp *int64 `json:"release_timestamp"`
|
||||||
|
uploadDateForReal *time.Time
|
||||||
|
Availability string `json:"availability"`
|
||||||
|
ReleaseDate string `json:"release_date"`
|
||||||
|
UploadDate string `json:"upload_date"`
|
||||||
|
|
||||||
|
//WasLive bool `json:"was_live"`
|
||||||
|
//Formats interface{} `json:"formats"`
|
||||||
|
//Thumbnail string `json:"thumbnail"`
|
||||||
|
//Uploader string `json:"uploader"`
|
||||||
|
//UploaderID string `json:"uploader_id"`
|
||||||
|
//UploaderURL string `json:"uploader_url"`
|
||||||
|
//ChannelURL string `json:"channel_url"`
|
||||||
|
//ViewCount int `json:"view_count"`
|
||||||
|
//AverageRating interface{} `json:"average_rating"`
|
||||||
|
//AgeLimit int `json:"age_limit"`
|
||||||
|
//WebpageURL string `json:"webpage_url"`
|
||||||
|
//PlayableInEmbed bool `json:"playable_in_embed"`
|
||||||
|
//AutomaticCaptions interface{} `json:"automatic_captions"`
|
||||||
|
//Subtitles interface{} `json:"subtitles"`
|
||||||
|
//Chapters interface{} `json:"chapters"`
|
||||||
|
//LikeCount int `json:"like_count"`
|
||||||
|
//Channel string `json:"channel"`
|
||||||
|
//ChannelFollowerCount int `json:"channel_follower_count"`
|
||||||
|
//OriginalURL string `json:"original_url"`
|
||||||
|
//WebpageURLBasename string `json:"webpage_url_basename"`
|
||||||
|
//WebpageURLDomain string `json:"webpage_url_domain"`
|
||||||
|
//Extractor string `json:"extractor"`
|
||||||
|
//ExtractorKey string `json:"extractor_key"`
|
||||||
|
//Playlist interface{} `json:"playlist"`
|
||||||
|
//PlaylistIndex interface{} `json:"playlist_index"`
|
||||||
|
//DisplayID string `json:"display_id"`
|
||||||
|
//Fulltitle string `json:"fulltitle"`
|
||||||
|
//DurationString string `json:"duration_string"`
|
||||||
|
//RequestedSubtitles interface{} `json:"requested_subtitles"`
|
||||||
|
//HasDrm bool `json:"__has_drm"`
|
||||||
|
//RequestedFormats interface{} `json:"requested_formats"`
|
||||||
|
//Format string `json:"format"`
|
||||||
|
//FormatID string `json:"format_id"`
|
||||||
|
//Ext string `json:"ext"`
|
||||||
|
//Protocol string `json:"protocol"`
|
||||||
|
//Language interface{} `json:"language"`
|
||||||
|
//FormatNote string `json:"format_note"`
|
||||||
|
//FilesizeApprox int `json:"filesize_approx"`
|
||||||
|
//Tbr float64 `json:"tbr"`
|
||||||
|
//Width int `json:"width"`
|
||||||
|
//Height int `json:"height"`
|
||||||
|
//Resolution string `json:"resolution"`
|
||||||
|
//Fps int `json:"fps"`
|
||||||
|
//DynamicRange string `json:"dynamic_range"`
|
||||||
|
//Vcodec string `json:"vcodec"`
|
||||||
|
//Vbr float64 `json:"vbr"`
|
||||||
|
//StretchedRatio interface{} `json:"stretched_ratio"`
|
||||||
|
//Acodec string `json:"acodec"`
|
||||||
|
//Abr float64 `json:"abr"`
|
||||||
|
//Asr int `json:"asr"`
|
||||||
|
//Epoch int `json:"epoch"`
|
||||||
|
//Filename string `json:"filename"`
|
||||||
|
//Urls string `json:"urls"`
|
||||||
|
//Type string `json:"_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Thumbnail struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Preference int `json:"preference"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Height int `json:"height,omitempty"`
|
||||||
|
Width int `json:"width,omitempty"`
|
||||||
|
Resolution string `json:"resolution,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *YtdlVideo) GetUploadTime() time.Time {
|
||||||
|
//priority list:
|
||||||
|
// release timestamp from yt
|
||||||
|
// release timestamp from morty
|
||||||
|
// release date from yt
|
||||||
|
// upload date from yt
|
||||||
|
if v.uploadDateForReal != nil {
|
||||||
|
return *v.uploadDateForReal
|
||||||
|
}
|
||||||
|
|
||||||
|
var ytdlReleaseTimestamp time.Time
|
||||||
|
if v.ReleaseTimestamp != nil && *v.ReleaseTimestamp > 0 {
|
||||||
|
ytdlReleaseTimestamp = time.Unix(*v.ReleaseTimestamp, 0).UTC()
|
||||||
|
}
|
||||||
|
//get morty timestamp
|
||||||
|
var mortyReleaseTimestamp time.Time
|
||||||
|
mortyRelease, err := sdk.GetAPIsConfigs().GetReleasedDate(v.ID)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
} else if mortyRelease != nil {
|
||||||
|
mortyReleaseTimestamp, err = time.ParseInLocation(time.RFC3339, mortyRelease.ReleaseTime, time.UTC)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ytdlReleaseDate, err := time.Parse("20060102", v.ReleaseDate)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
ytdlUploadDate, err := time.Parse("20060102", v.UploadDate)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
if !ytdlReleaseTimestamp.IsZero() {
|
||||||
|
v.uploadDateForReal = &ytdlReleaseTimestamp
|
||||||
|
} else if !mortyReleaseTimestamp.IsZero() {
|
||||||
|
v.uploadDateForReal = &mortyReleaseTimestamp
|
||||||
|
} else if !ytdlReleaseDate.IsZero() {
|
||||||
|
v.uploadDateForReal = &ytdlReleaseDate
|
||||||
|
} else {
|
||||||
|
v.uploadDateForReal = &ytdlUploadDate
|
||||||
|
}
|
||||||
|
|
||||||
|
return *v.uploadDateForReal
|
||||||
|
}
|
28
e2e/chainquery/docker-compose.yml
Normal file
28
e2e/chainquery/docker-compose.yml
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
version: '3.4'
|
||||||
|
|
||||||
|
services:
|
||||||
|
###########
|
||||||
|
## MYSQL ##
|
||||||
|
###########
|
||||||
|
mysql:
|
||||||
|
image: mysql:5.7.23
|
||||||
|
restart: "no"
|
||||||
|
ports:
|
||||||
|
- 3306:3306
|
||||||
|
volumes:
|
||||||
|
- "../persist/chainquery/db:/var/lib/mysql"
|
||||||
|
## This one may need to be tweaked based on where you run this docker-compose from.
|
||||||
|
- "../stuff/my.cnf:/etc/mysql/conf.d/chainquery-optimizations.cnf"
|
||||||
|
################
|
||||||
|
## Chainquery ##
|
||||||
|
################
|
||||||
|
chainquery:
|
||||||
|
image: lbry/chainquery:v1.8.1
|
||||||
|
restart: "no"
|
||||||
|
ports:
|
||||||
|
- 6300:6300
|
||||||
|
depends_on:
|
||||||
|
- mysql
|
||||||
|
## TODO: Uncomment this in a docker-compose.override.yml to allow for external configurations.
|
||||||
|
volumes:
|
||||||
|
- "../persist/chainquery/config/chainqueryconfig.toml:/etc/chainquery/chainqueryconfig.toml"
|
33
e2e/chainquery/docker/Dockerfile
Normal file
33
e2e/chainquery/docker/Dockerfile
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
## Get the latest source and extract it for the app container.
|
||||||
|
## Design choices, two RUN layers intended to keep builds faster, the zipped
|
||||||
|
FROM ubuntu:18.04 as prep
|
||||||
|
LABEL MAINTAINER="leopere [at] nixc [dot] us"
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get -y install unzip curl telnet wait-for-it && \
|
||||||
|
apt-get autoclean -y && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
WORKDIR /
|
||||||
|
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||||
|
COPY ./start.sh start
|
||||||
|
COPY ./healthcheck.sh healthcheck
|
||||||
|
ARG VERSION="master"
|
||||||
|
RUN curl -s -o /chainquery http://build.lbry.io/chainquery/branch-"${VERSION}"/chainquery && \
|
||||||
|
chmod +x /chainquery
|
||||||
|
|
||||||
|
|
||||||
|
FROM ubuntu:18.04 as app
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get -y install telnet wait-for-it && \
|
||||||
|
apt-get autoclean -y && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
ARG VERSION="master"
|
||||||
|
ADD https://raw.githubusercontent.com/lbryio/chainquery/"${VERSION}"/config/default/chainqueryconfig.toml /etc/lbry/chainqueryconfig.toml.orig
|
||||||
|
RUN adduser chainquery --gecos GECOS --shell /bin/bash --disabled-password --home /home/chainquery && \
|
||||||
|
chown -R chainquery:chainquery /etc/lbry
|
||||||
|
COPY --from=prep ./healthcheck /chainquery /start /usr/bin/
|
||||||
|
HEALTHCHECK --interval=1m --timeout=30s \
|
||||||
|
CMD healthcheck
|
||||||
|
EXPOSE 6300
|
||||||
|
USER chainquery
|
||||||
|
STOPSIGNAL SIGINT
|
||||||
|
CMD ["start"]
|
8
e2e/chainquery/docker/build.sh
Executable file
8
e2e/chainquery/docker/build.sh
Executable file
|
@ -0,0 +1,8 @@
|
||||||
|
#!/bin/bash
|
||||||
|
if [ $# -eq 0 ]
|
||||||
|
then
|
||||||
|
echo "No docker tag argument supplied. Use './build.sh <tag>'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
docker build --no-cache --build-arg VERSION=$1 --tag lbry/chainquery:$1 .
|
||||||
|
docker push lbry/chainquery:$1
|
2
e2e/chainquery/docker/healthcheck.sh
Executable file
2
e2e/chainquery/docker/healthcheck.sh
Executable file
|
@ -0,0 +1,2 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
curl --fail http://localhost:6300/api/status || exit 1
|
9
e2e/chainquery/docker/my.cnf
Normal file
9
e2e/chainquery/docker/my.cnf
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
# Default Homebrew MySQL server config
|
||||||
|
[mysqld]
|
||||||
|
# Only allow connections from localhost
|
||||||
|
innodb_log_file_size=5G
|
||||||
|
key_buffer_size=1G
|
||||||
|
innodb_flush_log_at_trx_commit = 0
|
||||||
|
innodb_autoinc_lock_mode=2
|
||||||
|
innodb_buffer_pool_size=1G
|
||||||
|
innodb_log_buffer_size=1G
|
51
e2e/chainquery/docker/start.sh
Executable file
51
e2e/chainquery/docker/start.sh
Executable file
|
@ -0,0 +1,51 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
## Config setup
|
||||||
|
|
||||||
|
## Setup Values
|
||||||
|
DEBUGMODE=$(echo "debugmode=$DEBUGMODE")
|
||||||
|
LBRYCRDURL=$(echo "lbrycrdurl=\"rpc://$RPC_USER:$RPC_PASSWORD@10.5.1.2:9245\"")
|
||||||
|
MYSQLDSN=$(echo "mysqldsn=\"$MYSQL_USER:$MYSQL_PASSWORD@tcp($MYSQL_SERVER:3306)/$MYSQL_DATABASE\"")
|
||||||
|
APIMYSQLDSN=$(echo "apimysqldsn=\"$MYSQL_USER:$MYSQL_PASSWORD@tcp($MYSQL_SERVER:3306)/$MYSQL_DATABASE\"")
|
||||||
|
|
||||||
|
## Setup Defaults
|
||||||
|
DEBUGMODE_DEFAULT='#DEFAULT-debugmode=false'
|
||||||
|
LBRYCRDURL_DEFAULT='#DEFAULT-lbrycrdurl="rpc://lbry:lbry@localhost:9245"'
|
||||||
|
MYSQLDSN_DEFAULT='#DEFAULT-mysqldsn="lbry:lbry@tcp(localhost:3306)/chainquery"'
|
||||||
|
APIMYSQLDSN_DEFAULT='#DEFAULT-apimysqldsn="lbry:lbry@tcp(localhost:3306)/chainquery"'
|
||||||
|
|
||||||
|
## Add setup value variable name to this list to get processed on container start
|
||||||
|
CONFIG_SETTINGS=(
|
||||||
|
DEBUGMODE
|
||||||
|
LBRYCRDURL
|
||||||
|
MYSQLDSN
|
||||||
|
APIMYSQLDSN
|
||||||
|
)
|
||||||
|
|
||||||
|
function set_configs() {
|
||||||
|
## Set configs on container start if not already set.
|
||||||
|
for i in "${!CONFIG_SETTINGS[@]}"; do
|
||||||
|
## Indirect references http://tldp.org/LDP/abs/html/ivr.html
|
||||||
|
eval FROM_STRING=\$"${CONFIG_SETTINGS[$i]}_DEFAULT"
|
||||||
|
eval TO_STRING=\$${CONFIG_SETTINGS[$i]}
|
||||||
|
## TODO: Add a bit more magic to make sure that you're only configuring things if not set by config mounts.
|
||||||
|
sed -i "s~$FROM_STRING~"$TO_STRING"~g" /etc/lbry/chainqueryconfig.toml
|
||||||
|
done
|
||||||
|
echo "Reading config for debugging."
|
||||||
|
cat /etc/lbry/chainqueryconfig.toml
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ ! -f /etc/lbry/chainqueryconfig.toml ]]; then
|
||||||
|
echo "[INFO]: Did not find chainqueryconfig.toml"
|
||||||
|
echo " Installing default and configuring with provided environment variables if any."
|
||||||
|
## Install fresh copy of config file.
|
||||||
|
echo "cp -v /etc/lbry/chainqueryconfig.toml.orig /etc/lbry/chainqueryconfig.toml"
|
||||||
|
cp -v /etc/lbry/chainqueryconfig.toml.orig /etc/lbry/chainqueryconfig.toml
|
||||||
|
ls -lAh /etc/lbry/
|
||||||
|
set_configs
|
||||||
|
else
|
||||||
|
echo "[INFO]: Found a copy of chainqueryconfig.toml in /etc/lbry"
|
||||||
|
fi
|
||||||
|
|
||||||
|
## For now keeping this simple. Potentially eventually add all command args as envvars for the Dockerfile or use safe way to add args via docker-compose.yml
|
||||||
|
chainquery serve --configpath "/etc/lbry/"
|
99
e2e/chainqueryconfig.toml
Normal file
99
e2e/chainqueryconfig.toml
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
#Debug mode outputs specific information to the console
|
||||||
|
#DEFAULT: false
|
||||||
|
#debugmode=
|
||||||
|
|
||||||
|
#DebugQueryMode outputs SQL Boiler queries to the console.
|
||||||
|
#DEFAULT: false
|
||||||
|
#debugquerymode=
|
||||||
|
|
||||||
|
#LBRYcrd URL is required for chainquery to query the blockchain
|
||||||
|
#DEFAULT: "rpc://lbry:lbry@localhost:9245"
|
||||||
|
lbrycrdurl="rpc://lbry:lbry@lbrycrd:29245"
|
||||||
|
|
||||||
|
#MySQL DSN is required for chainquery to store information.
|
||||||
|
#DEFAULT: "lbry:lbry@tcp(localhost:3306)/chainquery"
|
||||||
|
#SUGGESTED: "lbry:lbry@unix(/var/run/mysqld/mysqld.sock)/chainquery"
|
||||||
|
mysqldsn="lbry:lbry@tcp(mysql:3306)/chainquery"
|
||||||
|
|
||||||
|
#API MySQL DSN is required for chainquery to expose a SQL query service
|
||||||
|
#DEFAULT: "lbry:lbry@tcp(localhost:3306)/chainquery"
|
||||||
|
#SUGGESTED: "lbry:lbry@unix(/var/run/mysqld/mysqld.sock)/chainquery"
|
||||||
|
apimysqldsn="lbry:lbry@tcp(mysql:3306)/chainquery"
|
||||||
|
|
||||||
|
#API Host and Port is required for the API Server to bind and listen on.
|
||||||
|
#DEFAULT: "0.0.0.0:6300"
|
||||||
|
#apihostport=
|
||||||
|
|
||||||
|
#Profile mode enables and disables the reporting of a profile for chainquery
|
||||||
|
#DEFAULT: false
|
||||||
|
#profilemode=
|
||||||
|
|
||||||
|
#Daemon mode tells chainquery how hard it should work catch up processing the blockchain
|
||||||
|
#deamonmode=0 #BeastMode it continuously process block after block until caughtup.
|
||||||
|
#daemonmode=1 #SlowAndSteadyMode it will process block with a frequency of 1 block every 100ms
|
||||||
|
#daemonmode=2 #DelayMode it will process a block with a configured delay frequency (set via 'processingdelay')
|
||||||
|
#daemonmode=3 #DaemonMode it will process a block every iteration of the daemon.
|
||||||
|
#DEFAULT: 0
|
||||||
|
#deamonmode=
|
||||||
|
|
||||||
|
#Default client timeout is for communication with the api of chainquery
|
||||||
|
#DEFAULT: 20 #Measured in seconds
|
||||||
|
#defaultclienttimeout=
|
||||||
|
|
||||||
|
#Processing delay is used to determine how frequently chainquery should process a block
|
||||||
|
# It is only used if Daemon mode is set to delay mode
|
||||||
|
#DEFAULT: 100 #Measured in milliseconds
|
||||||
|
#processingdelay=
|
||||||
|
|
||||||
|
#Daemon delay is the frequency at which chainquery checks for work to do.
|
||||||
|
#DEFAULT: 1 #Measured in seconds
|
||||||
|
#daemondelay=
|
||||||
|
|
||||||
|
#Profiling options - will output the time take for certain opertions related to the below category
|
||||||
|
#DEFAULT: false (for all 3 params)
|
||||||
|
#daemonprofile=
|
||||||
|
#lbrycrdprofile=
|
||||||
|
#mysqlprofile=
|
||||||
|
|
||||||
|
#Slack Hook URL allows slack integration. All logging info level and above is posted to a slack channel.
|
||||||
|
#DEFAULT: ""
|
||||||
|
#slackhookurl=
|
||||||
|
|
||||||
|
#Slack Channel is the channel that you want the messages to appear. Works together with the hook url.
|
||||||
|
#DEFAULT: ""
|
||||||
|
#slackchannel=
|
||||||
|
|
||||||
|
#Slack Log Level tells chainquery what level of logging will be sent to the slack channel. It will log all levels below
|
||||||
|
# it as well. Panic=0,Fatal=1,Error=2,Warning=3,Info=4,Debug=5
|
||||||
|
#DEFAULT: 0
|
||||||
|
#slackloglevel=
|
||||||
|
|
||||||
|
#The command that should be executed to trigger a self update of the software. For linux, for example, `<yourscript>.sh`
|
||||||
|
#DEFAULT: ""
|
||||||
|
#autoupdatecommand=
|
||||||
|
|
||||||
|
#Twilio service of chainquery to send specifically important information to key users of the Chainquery install.
|
||||||
|
#DEFAULT:
|
||||||
|
##twiliosid=""
|
||||||
|
##twilioauthtoken=""
|
||||||
|
##smsrecipients=["",""]
|
||||||
|
##smsfromphonenumber=""
|
||||||
|
#twiliosid=
|
||||||
|
#twilioauthtoken=
|
||||||
|
#smsrecipients=
|
||||||
|
#smsfromphonenumber=
|
||||||
|
|
||||||
|
#API Keys - Disallowed by default unless keys are entered.
|
||||||
|
#DEFAULT: []
|
||||||
|
#apikeys=
|
||||||
|
|
||||||
|
#Max Failures - Specifies the number of failures that can happen in processing a transaction. This is for parallel
|
||||||
|
#transaction processing which puts a transaction to the back of the processing queue if it fails. It can fail say if its
|
||||||
|
#source output to spend is not already processed.
|
||||||
|
#DEFAULT: 1000
|
||||||
|
#maxfailures=
|
||||||
|
|
||||||
|
#Block Chain Name - Specifies the chain params for parsing blocks, transactions, claims, and addresses. valid choices are
|
||||||
|
#lbrycrd_main, lbrycrd_testnet, and lbrycrd_regtest.
|
||||||
|
#DEFAULT: "lbrycrd_main"
|
||||||
|
blockchainname="lbrycrd_regtest"
|
|
@ -1,9 +1,7 @@
|
||||||
#blockchain_name: lbrycrd_main
|
|
||||||
#blockchain_name: lbrycrd_testnet
|
|
||||||
blockchain_name: lbrycrd_regtest
|
blockchain_name: lbrycrd_regtest
|
||||||
lbryum_servers:
|
lbryum_servers:
|
||||||
# - spv1.lbry.com:50001 #Production Wallet Server
|
|
||||||
- walletserver:50001
|
- walletserver:50001
|
||||||
|
reflect_streams: false
|
||||||
save_blobs: true
|
save_blobs: true
|
||||||
save_files: false
|
save_files: false
|
||||||
share_usage_data: false
|
share_usage_data: false
|
||||||
|
|
|
@ -15,7 +15,10 @@ mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ASSIGNGROOP"
|
||||||
#Add youtuber to sync
|
#Add youtuber to sync
|
||||||
ADDYTSYNCER='INSERT INTO user (given_name) VALUE("youtuber")'
|
ADDYTSYNCER='INSERT INTO user (given_name) VALUE("youtuber")'
|
||||||
mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTSYNCER"
|
mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTSYNCER"
|
||||||
|
#Insert an auth token for the youtuber to be used by ytsync
|
||||||
|
ADDYTSYNCAUTHTOKEN='INSERT INTO auth_token (user_id, value) VALUE(2,"youtubertoken")'
|
||||||
|
mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTSYNCAUTHTOKEN"
|
||||||
#Add their youtube channel to be synced
|
#Add their youtube channel to be synced
|
||||||
ADDYTCHANNEL="INSERT INTO youtube_data (user_id, status_token,desired_lbry_channel,channel_id,channel_name,status,created_at,source,total_videos,total_subscribers)
|
ADDYTCHANNEL="INSERT INTO youtube_data (user_id, status_token,desired_lbry_channel,channel_id,channel_name,status,created_at,source,total_videos,total_subscribers,should_sync,redeemable,total_views,reviewed,last_uploaded_video,length_limit,size_limit,reward_amount,reward_expiration)
|
||||||
VALUE(2,'3qzGyuVjQaf7t4pKKu2Er1NRW2LJkeWw','@beamertest','UCCyr5j8akeu9j4Q7urV0Lqw','BeamerAtLBRY','queued','2019-08-01 00:00:00','sync',1,0)"
|
VALUE(2,'3qzGyuVjQaf7t4pKKu2Er1NRW2LJkeWw','$1','$2','СтопХам','queued','2019-08-01 00:00:00','sync',1000,1000,1,1,10000,1,'$3',60,2048,0,'2019-08-01 00:00:00')"
|
||||||
mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTCHANNEL"
|
mysql -u lbry -plbry -D lbry -h "127.0.0.1" --default-character-set=utf8 -P 15500 -e "$ADDYTCHANNEL"
|
||||||
|
|
|
@ -4,8 +4,8 @@ services:
|
||||||
## Lbrycrd ##
|
## Lbrycrd ##
|
||||||
#############
|
#############
|
||||||
lbrycrd:
|
lbrycrd:
|
||||||
image: lbry/lbrycrd:v0.12.4.1
|
image: lbry/lbrycrd:v0.17.3.2-deprecatedrpc
|
||||||
restart: always
|
restart: "no"
|
||||||
ports:
|
ports:
|
||||||
- "15201:29246"
|
- "15201:29246"
|
||||||
- "15200:29245"
|
- "15200:29245"
|
||||||
|
@ -21,7 +21,7 @@ services:
|
||||||
## Wallet Server ##
|
## Wallet Server ##
|
||||||
###################
|
###################
|
||||||
walletserver:
|
walletserver:
|
||||||
image: lbry/wallet-server:v0.38.5
|
image: lbry/wallet-server:v0.101.1
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
- DB_DIRECTORY=/database
|
- DB_DIRECTORY=/database
|
||||||
|
@ -31,12 +31,14 @@ services:
|
||||||
- BANDWIDTH_LIMIT=80000000000
|
- BANDWIDTH_LIMIT=80000000000
|
||||||
- SESSION_TIMEOUT=10000000000000000000000000
|
- SESSION_TIMEOUT=10000000000000000000000000
|
||||||
- TCP_PORT=50001
|
- TCP_PORT=50001
|
||||||
|
- ELASTIC_HOST=es01
|
||||||
ports:
|
ports:
|
||||||
- "15300:50001"
|
- "15300:50001"
|
||||||
expose:
|
expose:
|
||||||
- "50001"
|
- "50001"
|
||||||
depends_on:
|
depends_on:
|
||||||
- lbrycrd
|
- lbrycrd
|
||||||
|
- es01
|
||||||
ulimits:
|
ulimits:
|
||||||
nofile:
|
nofile:
|
||||||
soft: 90000
|
soft: 90000
|
||||||
|
@ -44,10 +46,30 @@ services:
|
||||||
#command: lbry.wallet.server.coin.LBC
|
#command: lbry.wallet.server.coin.LBC
|
||||||
command: lbry.wallet.server.coin.LBCRegTest
|
command: lbry.wallet.server.coin.LBCRegTest
|
||||||
#############
|
#############
|
||||||
|
## elasticsearch ##
|
||||||
|
#############
|
||||||
|
es01:
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
|
||||||
|
container_name: es01
|
||||||
|
environment:
|
||||||
|
- node.name=es01
|
||||||
|
- discovery.type=single-node
|
||||||
|
- indices.query.bool.max_clause_count=8196
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- "ES_JAVA_OPTS=-Xms4g -Xmx4g"
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
ports:
|
||||||
|
- "9200:9200"
|
||||||
|
expose:
|
||||||
|
- "9200"
|
||||||
|
#############
|
||||||
## Lbrynet ##
|
## Lbrynet ##
|
||||||
#############
|
#############
|
||||||
lbrynet:
|
lbrynet:
|
||||||
image: lbry/lbrynet:v0.38.6
|
image: lbry/lbrynet:v0.99.0
|
||||||
restart: always
|
restart: always
|
||||||
ports:
|
ports:
|
||||||
- "15100:5279"
|
- "15100:5279"
|
||||||
|
@ -59,6 +81,7 @@ services:
|
||||||
- walletserver
|
- walletserver
|
||||||
environment:
|
environment:
|
||||||
- LBRY_STREAMING_SERVER=0.0.0.0:5280
|
- LBRY_STREAMING_SERVER=0.0.0.0:5280
|
||||||
|
- LBRY_FEE_PER_NAME_CHAR=0
|
||||||
volumes:
|
volumes:
|
||||||
- "./persist/.lbrynet:/home/lbrynet"
|
- "./persist/.lbrynet:/home/lbrynet"
|
||||||
- ".:/etc/lbry" #Put your daemon_settings.yml here
|
- ".:/etc/lbry" #Put your daemon_settings.yml here
|
||||||
|
@ -68,7 +91,7 @@ services:
|
||||||
## MySQL ##
|
## MySQL ##
|
||||||
###########
|
###########
|
||||||
mysql:
|
mysql:
|
||||||
image: mysql/mysql-server:5.7.27
|
image: mysql/mysql-server:5.7.33
|
||||||
restart: "no"
|
restart: "no"
|
||||||
ports:
|
ports:
|
||||||
- "15500:3306"
|
- "15500:3306"
|
||||||
|
@ -80,11 +103,14 @@ services:
|
||||||
- MYSQL_USER=lbry
|
- MYSQL_USER=lbry
|
||||||
- MYSQL_PASSWORD=lbry
|
- MYSQL_PASSWORD=lbry
|
||||||
- MYSQL_LOG_CONSOLE=true
|
- MYSQL_LOG_CONSOLE=true
|
||||||
|
volumes:
|
||||||
|
- "./init.sql:/docker-entrypoint-initdb.d/init.sql"
|
||||||
|
- "./chainquery/docker/my.cnf:/etc/mysql/conf.d/chainquery-optimizations.cnf"
|
||||||
###################
|
###################
|
||||||
## Internal APIs ##
|
## Internal APIs ##
|
||||||
###################
|
###################
|
||||||
internalapis:
|
internalapis:
|
||||||
image: lbry/internal-apis:master
|
image: odyseeteam/internal-apis:master
|
||||||
restart: "no"
|
restart: "no"
|
||||||
ports:
|
ports:
|
||||||
- "15400:8080"
|
- "15400:8080"
|
||||||
|
@ -93,11 +119,23 @@ services:
|
||||||
depends_on:
|
depends_on:
|
||||||
- mysql
|
- mysql
|
||||||
- lbrycrd
|
- lbrycrd
|
||||||
- lbrynet
|
|
||||||
environment:
|
environment:
|
||||||
- MYSQL_DSN=lbry:lbry@tcp(mysql:3306)/lbry
|
- MYSQL_DSN=lbry:lbry@tcp(mysql:3306)/lbry
|
||||||
- LBRYCRD_CONNECT=rpc://lbry:lbry@lbrycrd:29245
|
- LBRYCRD_CONNECT=rpc://lbry:lbry@lbrycrd:29245
|
||||||
- MYSQL_USER=lbry
|
- REPLICA_DSN=lbry:lbry@tcp(mysql:3306)/lbry
|
||||||
- MYSQL_PASS=lbry
|
entrypoint: wait-for-it -t 0 chainquery:6300 -- wait-for-it -t 0 lbrycrd:29245 -- ./latest serve
|
||||||
- MYSQL_DATABASE=lbry
|
################
|
||||||
entrypoint: wait-for-it mysql:3306 -- wait-for-it lbrynet:5279 -- ./latest serve
|
## Chainquery ##
|
||||||
|
################
|
||||||
|
chainquery:
|
||||||
|
image: odyseeteam/chainquery:master
|
||||||
|
restart: "no"
|
||||||
|
ports:
|
||||||
|
- 6300:6300
|
||||||
|
depends_on:
|
||||||
|
- lbrycrd
|
||||||
|
- mysql
|
||||||
|
## TODO: Uncomment this in a docker-compose.override.yml to allow for external configurations.
|
||||||
|
volumes:
|
||||||
|
- ./chainqueryconfig.toml:/etc/lbry/chainqueryconfig.toml
|
||||||
|
entrypoint: wait-for-it -t 0 lbrycrd:29245 -- wait-for-it -t 0 mysql:3306 -- start
|
67
e2e/e2e.sh
67
e2e/e2e.sh
|
@ -4,6 +4,8 @@ set -e
|
||||||
|
|
||||||
#Always compile ytsync
|
#Always compile ytsync
|
||||||
make
|
make
|
||||||
|
#Always compile supporty
|
||||||
|
cd e2e/supporty && make && cd ../..
|
||||||
|
|
||||||
#OVERRIDE this in your .env file if running from mac. Check docker-compose.yml for details
|
#OVERRIDE this in your .env file if running from mac. Check docker-compose.yml for details
|
||||||
export LOCAL_TMP_DIR="/var/tmp:/var/tmp"
|
export LOCAL_TMP_DIR="/var/tmp:/var/tmp"
|
||||||
|
@ -12,11 +14,8 @@ export LOCAL_TMP_DIR="/var/tmp:/var/tmp"
|
||||||
touch -a .env && set -o allexport; source ./.env; set +o allexport
|
touch -a .env && set -o allexport; source ./.env; set +o allexport
|
||||||
echo "LOCAL_TMP_DIR=$LOCAL_TMP_DIR"
|
echo "LOCAL_TMP_DIR=$LOCAL_TMP_DIR"
|
||||||
# Compose settings - docker only
|
# Compose settings - docker only
|
||||||
export SLACK_CHANNEL="ytsync-travis"
|
|
||||||
export LBRY_API_TOKEN="ytsyntoken"
|
|
||||||
export LBRY_WEB_API="http://localhost:15400"
|
|
||||||
export LBRYNET_ADDRESS="http://localhost:15100"
|
export LBRYNET_ADDRESS="http://localhost:15100"
|
||||||
export LBRYCRD_STRING="tcp://lbry:lbry@localhost:15200"
|
export LBRYCRD_STRING="tcp://lbry:lbry@localhost:15200" #required for supporty
|
||||||
export LBRYNET_USE_DOCKER=true
|
export LBRYNET_USE_DOCKER=true
|
||||||
export REFLECT_BLOBS=false
|
export REFLECT_BLOBS=false
|
||||||
export CLEAN_ON_STARTUP=true
|
export CLEAN_ON_STARTUP=true
|
||||||
|
@ -24,8 +23,9 @@ export REGTEST=true
|
||||||
# Local settings
|
# Local settings
|
||||||
export BLOBS_DIRECTORY="$(pwd)/e2e/blobsfiles"
|
export BLOBS_DIRECTORY="$(pwd)/e2e/blobsfiles"
|
||||||
export LBRYNET_DIR="$(pwd)/e2e/persist/.lbrynet/.local/share/lbry/lbrynet/"
|
export LBRYNET_DIR="$(pwd)/e2e/persist/.lbrynet/.local/share/lbry/lbrynet/"
|
||||||
export LBRYNET_WALLETS_DIR="$(pwd)/e2e/persist/.lbrynet/.local/share/lbry/lbryum"
|
export LBRYUM_DIR="$(pwd)/e2e/persist/.lbrynet/.local/share/lbry/lbryum"
|
||||||
export TMP_DIR="/var/tmp"
|
export TMP_DIR="/var/tmp"
|
||||||
|
export CHAINNAME="lbrycrd_regtest"
|
||||||
export UID
|
export UID
|
||||||
|
|
||||||
cd ./e2e
|
cd ./e2e
|
||||||
|
@ -47,19 +47,50 @@ until curl --output /dev/null --silent --head --fail http://localhost:15400; do
|
||||||
done
|
done
|
||||||
echo "successfully started..."
|
echo "successfully started..."
|
||||||
|
|
||||||
#Data Setup for test
|
channelToSync="UCMn-zv1SE-2y6vyewscfFqw"
|
||||||
./data_setup.sh
|
channelName=@whatever"$(date +%s)"
|
||||||
|
latestVideoID="yPJgjiMbmX0"
|
||||||
|
|
||||||
# Execute the test!
|
#Data Setup for test
|
||||||
./../bin/ytsync --channelID UCCyr5j8akeu9j4Q7urV0Lqw #Force channel intended...just in case. This channel lines up with the api container
|
./data_setup.sh "$channelName" "$channelToSync" "$latestVideoID"
|
||||||
# Assert the status
|
|
||||||
|
# Execute the sync test!
|
||||||
|
./../bin/ytsync --channelID "$channelToSync" --videos-limit 2 --concurrent-jobs 4 --quick #Force channel intended...just in case. This channel lines up with the api container
|
||||||
status=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT status FROM youtube_data WHERE id=1')
|
status=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT status FROM youtube_data WHERE id=1')
|
||||||
videoStatus=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT status FROM synced_video WHERE id=1')
|
videoStatus=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT status FROM synced_video WHERE id=1')
|
||||||
if [[ $status != "synced" || $videoStatus != "published" ]]; then
|
videoClaimID1=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT publish.claim_id FROM synced_video INNER JOIN publish ON publish.id = synced_video.publish_id WHERE synced_video.id=1')
|
||||||
docker-compose logs --tail="all" lbrycrd
|
videoClaimID2=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT publish.claim_id FROM synced_video INNER JOIN publish ON publish.id = synced_video.publish_id WHERE synced_video.id=2')
|
||||||
docker-compose logs --tail="all" walletserver
|
videoClaimAddress1=$(mysql -u lbry -plbry -ss -D chainquery -h "127.0.0.1" -P 15500 -e 'SELECT claim_address FROM claim WHERE id=2')
|
||||||
docker-compose logs --tail="all" lbrynet
|
videoClaimAddress2=$(mysql -u lbry -plbry -ss -D chainquery -h "127.0.0.1" -P 15500 -e 'SELECT claim_address FROM claim WHERE id=3')
|
||||||
docker-compose logs --tail="all" internalapis
|
# Create Supports for published claim
|
||||||
echo "List local /var/tmp"
|
./supporty/supporty "$channelName" "${videoClaimID1}" "${videoClaimAddress1}" lbrycrd_regtest 1.0
|
||||||
find /var/tmp
|
./supporty/supporty "$channelName" "${videoClaimID2}" "${videoClaimAddress2}" lbrycrd_regtest 2.0
|
||||||
exit 1; fi;
|
./supporty/supporty "$channelName" "${videoClaimID2}" "${videoClaimAddress2}" lbrycrd_regtest 3.0
|
||||||
|
./supporty/supporty "$channelName" "${videoClaimID1}" "${videoClaimAddress1}" lbrycrd_regtest 3.0
|
||||||
|
curl --data-binary '{"jsonrpc":"1.0","id":"curltext","method":"generate","params":[1]}' -H 'content-type:text/plain;' --user lbry:lbry http://localhost:15200
|
||||||
|
# Reset status for transfer test
|
||||||
|
mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e "UPDATE youtube_data SET status = 'queued' WHERE id = 1"
|
||||||
|
# Trigger transfer api
|
||||||
|
curl -i -H 'Accept: application/json' -H 'Content-Type: application/json' 'http://localhost:15400/yt/transfer?auth_token=youtubertoken&address=n4eYeXAYmHo4YRUDEfsEhucy8y5LKRMcHg&public_key=tpubDA9GDAntyJu4hD3wU7175p7CuV6DWbYXfyb2HedBA3yuBp9HZ4n3QE4Ex6RHCSiEuVp2nKAL1Lzf2ZLo9ApaFgNaJjG6Xo1wB3iEeVbrDZp'
|
||||||
|
# Execute the transfer test!
|
||||||
|
./../bin/ytsync --channelID $channelToSync --videos-limit 2 --concurrent-jobs 4 --quick #Force channel intended...just in case. This channel lines up with the api container
|
||||||
|
# Check that the channel and the video are marked as transferred and that all supports are spent
|
||||||
|
channelTransferStatus=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT distinct transfer_state FROM youtube_data')
|
||||||
|
videoTransferStatus=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT distinct transferred FROM synced_video')
|
||||||
|
nrUnspentSupports=$(mysql -u lbry -plbry -ss -D chainquery -h "127.0.0.1" -P 15500 -e 'SELECT COUNT(*) FROM chainquery.support INNER JOIN output ON output.transaction_hash = support.transaction_hash_id AND output.vout = support.vout WHERE output.is_spent = 0')
|
||||||
|
if [[ $status != "synced" || $videoStatus != "published" || $channelTransferStatus != "2" || $videoTransferStatus != "1" || $nrUnspentSupports != "1" ]]; then
|
||||||
|
echo "~~!!!~~~FAILED~~~!!!~~"
|
||||||
|
echo "Channel Status: $status"
|
||||||
|
echo "Video Status: $videoStatus"
|
||||||
|
echo "Channel Transfer Status: $channelTransferStatus"
|
||||||
|
echo "Video Transfer Status: $videoTransferStatus"
|
||||||
|
echo "Nr Unspent Supports: $nrUnspentSupports"
|
||||||
|
#docker-compose logs --tail="all" lbrycrd
|
||||||
|
#docker-compose logs --tail="all" walletserver
|
||||||
|
#docker-compose logs --tail="all" lbrynet
|
||||||
|
#docker-compose logs --tail="all" internalapis
|
||||||
|
exit 1;
|
||||||
|
else
|
||||||
|
echo "SUCCESSSSSSSSSSSSS!"
|
||||||
|
fi;
|
||||||
|
docker-compose down
|
3
e2e/init.sql
Normal file
3
e2e/init.sql
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
CREATE DATABASE IF NOT EXISTS chainquery;
|
||||||
|
GRANT ALL PRIVILEGES ON chainquery.* TO 'lbry'@'%';
|
||||||
|
FLUSH PRIVILEGES;
|
|
@ -4,5 +4,5 @@ if [ $# -eq 0 ]
|
||||||
echo "No docker tag argument supplied. Use './build.sh <tag>'"
|
echo "No docker tag argument supplied. Use './build.sh <tag>'"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
docker build --tag lbry/lbrycrd:$1 .
|
docker build --build-arg VERSION=$1 --tag lbry/lbrycrd:$1 .
|
||||||
docker push lbry/lbrycrd:$1
|
docker push lbry/lbrycrd:$1
|
|
@ -11,9 +11,9 @@ function override_config_option() {
|
||||||
local var=$1 option=$2 config=$3
|
local var=$1 option=$2 config=$3
|
||||||
if [[ -v $var ]]; then
|
if [[ -v $var ]]; then
|
||||||
# Remove the existing config option:
|
# Remove the existing config option:
|
||||||
sed -i "/^$option\W*=/d" $config
|
sed -i "/^$option\W*=/d" "$config"
|
||||||
# Add the value from the environment:
|
# Add the value from the environment:
|
||||||
echo "$option=${!var}" >> $config
|
echo "$option=${!var}" >> "$config"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,12 +38,17 @@ function set_config() {
|
||||||
else
|
else
|
||||||
echo "Creating a fresh config file from environment variables."
|
echo "Creating a fresh config file from environment variables."
|
||||||
## Set config params
|
## Set config params
|
||||||
echo "port=${PORT=9246}" > $CONFIG_PATH
|
{
|
||||||
echo "rpcuser=${RPC_USER=lbry}" >> $CONFIG_PATH
|
echo "port=${PORT=9246}"
|
||||||
echo "rpcpassword=${RPC_PASSWORD=lbry}" >> $CONFIG_PATH
|
echo "rpcuser=${RPC_USER=lbry}"
|
||||||
echo "rpcallowip=${RPC_ALLOW_IP=127.0.0.1/24}" >> $CONFIG_PATH
|
echo "rpcpassword=${RPC_PASSWORD=lbry}"
|
||||||
echo "rpcport=${RPC_PORT=9245}" >> $CONFIG_PATH
|
echo "rpcallowip=${RPC_ALLOW_IP=127.0.0.1/24}"
|
||||||
echo "rpcbind=${RPC_BIND=0.0.0.0}" >> $CONFIG_PATH
|
echo "rpcport=${RPC_PORT=9245}"
|
||||||
|
echo "rpcbind=${RPC_BIND=0.0.0.0}"
|
||||||
|
echo "deprecatedrpc=accounts"
|
||||||
|
echo "deprecatedrpc=validateaddress"
|
||||||
|
echo "deprecatedrpc=signrawtransaction"
|
||||||
|
} >> $CONFIG_PATH
|
||||||
fi
|
fi
|
||||||
echo "Config: "
|
echo "Config: "
|
||||||
cat $CONFIG_PATH
|
cat $CONFIG_PATH
|
||||||
|
@ -76,7 +81,7 @@ case $RUN_MODE in
|
||||||
regtest )
|
regtest )
|
||||||
## Set config params
|
## Set config params
|
||||||
## TODO: Make this more automagic in the future.
|
## TODO: Make this more automagic in the future.
|
||||||
mkdir -p `dirname $CONFIG_PATH`
|
mkdir -p "$(dirname $CONFIG_PATH)"
|
||||||
echo "rpcuser=lbry" > $CONFIG_PATH
|
echo "rpcuser=lbry" > $CONFIG_PATH
|
||||||
echo "rpcpassword=lbry" >> $CONFIG_PATH
|
echo "rpcpassword=lbry" >> $CONFIG_PATH
|
||||||
echo "rpcport=29245" >> $CONFIG_PATH
|
echo "rpcport=29245" >> $CONFIG_PATH
|
||||||
|
@ -86,6 +91,11 @@ case $RUN_MODE in
|
||||||
echo "txindex=1" >> $CONFIG_PATH
|
echo "txindex=1" >> $CONFIG_PATH
|
||||||
echo "server=1" >> $CONFIG_PATH
|
echo "server=1" >> $CONFIG_PATH
|
||||||
echo "printtoconsole=1" >> $CONFIG_PATH
|
echo "printtoconsole=1" >> $CONFIG_PATH
|
||||||
|
echo "deprecatedrpc=accounts" >> $CONFIG_PATH
|
||||||
|
echo "deprecatedrpc=validateaddress" >> $CONFIG_PATH
|
||||||
|
echo "deprecatedrpc=signrawtransaction" >> $CONFIG_PATH
|
||||||
|
echo "vbparams=segwit:0:999999999999" >> $CONFIG_PATH
|
||||||
|
echo "addresstype=legacy" >> $CONFIG_PATH
|
||||||
|
|
||||||
#nohup advance &>/dev/null &
|
#nohup advance &>/dev/null &
|
||||||
lbrycrdd -conf=$CONFIG_PATH $1
|
lbrycrdd -conf=$CONFIG_PATH $1
|
||||||
|
@ -93,7 +103,7 @@ case $RUN_MODE in
|
||||||
testnet )
|
testnet )
|
||||||
## Set config params
|
## Set config params
|
||||||
## TODO: Make this more automagic in the future.
|
## TODO: Make this more automagic in the future.
|
||||||
mkdir -p `dirname $CONFIG_PATH`
|
mkdir -p "$(dirname $CONFIG_PATH)"
|
||||||
echo "rpcuser=lbry" > $CONFIG_PATH
|
echo "rpcuser=lbry" > $CONFIG_PATH
|
||||||
echo "rpcpassword=lbry" >> $CONFIG_PATH
|
echo "rpcpassword=lbry" >> $CONFIG_PATH
|
||||||
echo "rpcport=29245" >> $CONFIG_PATH
|
echo "rpcport=29245" >> $CONFIG_PATH
|
||||||
|
@ -103,6 +113,9 @@ case $RUN_MODE in
|
||||||
echo "txindex=1" >> $CONFIG_PATH
|
echo "txindex=1" >> $CONFIG_PATH
|
||||||
echo "server=1" >> $CONFIG_PATH
|
echo "server=1" >> $CONFIG_PATH
|
||||||
echo "printtoconsole=1" >> $CONFIG_PATH
|
echo "printtoconsole=1" >> $CONFIG_PATH
|
||||||
|
echo "deprecatedrpc=accounts" >> $CONFIG_PATH
|
||||||
|
echo "deprecatedrpc=validateaddress" >> $CONFIG_PATH
|
||||||
|
echo "deprecatedrpc=signrawtransaction" >> $CONFIG_PATH
|
||||||
|
|
||||||
#nohup advance &>/dev/null &
|
#nohup advance &>/dev/null &
|
||||||
lbrycrdd -conf=$CONFIG_PATH $1
|
lbrycrdd -conf=$CONFIG_PATH $1
|
||||||
|
|
|
@ -8,7 +8,7 @@ services:
|
||||||
## Lbrynet ##
|
## Lbrynet ##
|
||||||
#############
|
#############
|
||||||
lbrynet:
|
lbrynet:
|
||||||
image: lbry/lbrynet:v0.38.6
|
image: lbry/lbrynet:v0.99.0
|
||||||
restart: "no"
|
restart: "no"
|
||||||
networks:
|
networks:
|
||||||
lbry-network:
|
lbry-network:
|
||||||
|
|
|
@ -4,7 +4,7 @@ LABEL MAINTAINER="leopere [at] nixc [dot] us"
|
||||||
RUN apt-get update && apt-get -y install unzip curl telnet wait-for-it
|
RUN apt-get update && apt-get -y install unzip curl telnet wait-for-it
|
||||||
|
|
||||||
## Add lbrynet
|
## Add lbrynet
|
||||||
ARG VERSION="v0.38.6"
|
ARG VERSION="latest"
|
||||||
RUN URL=$(curl -s https://api.github.com/repos/lbryio/lbry-sdk/releases/$(if [ "${VERSION}" = 'latest' ]; then echo "latest"; else echo "tags/${VERSION}"; fi) | grep browser_download_url | grep lbrynet-linux.zip | cut -d'"' -f4) && echo $URL && curl -L -o /lbrynet.linux.zip $URL
|
RUN URL=$(curl -s https://api.github.com/repos/lbryio/lbry-sdk/releases/$(if [ "${VERSION}" = 'latest' ]; then echo "latest"; else echo "tags/${VERSION}"; fi) | grep browser_download_url | grep lbrynet-linux.zip | cut -d'"' -f4) && echo $URL && curl -L -o /lbrynet.linux.zip $URL
|
||||||
|
|
||||||
COPY start.sh /usr/bin/start
|
COPY start.sh /usr/bin/start
|
||||||
|
|
|
@ -6,6 +6,7 @@ lbryum_servers:
|
||||||
- walletserver:50001
|
- walletserver:50001
|
||||||
save_blobs: true
|
save_blobs: true
|
||||||
save_files: false
|
save_files: false
|
||||||
|
reflect_streams: false #for the love of god, don't upload regtest streams to reflector!
|
||||||
share_usage_data: false
|
share_usage_data: false
|
||||||
tcp_port: 3333
|
tcp_port: 3333
|
||||||
udp_port: 4444
|
udp_port: 4444
|
||||||
|
|
12
e2e/supporty/Makefile
Normal file
12
e2e/supporty/Makefile
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
BINARY=supporty
|
||||||
|
|
||||||
|
DIR = $(shell cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)
|
||||||
|
BIN_DIR = ${DIR}
|
||||||
|
|
||||||
|
.PHONY: build clean test lint
|
||||||
|
.DEFAULT_GOAL: build
|
||||||
|
|
||||||
|
|
||||||
|
build:
|
||||||
|
mkdir -p ${BIN_DIR} && CGO_ENABLED=0 go build -asmflags -trimpath=${DIR} -o ${BIN_DIR}/${BINARY} supporty.go
|
||||||
|
chmod +x ${BIN_DIR}/${BINARY}
|
43
e2e/supporty/supporty.go
Normal file
43
e2e/supporty/supporty.go
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/lbryio/ytsync/v5/util"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if len(os.Args) != 6 {
|
||||||
|
logrus.Info(strings.Join(os.Args, ","))
|
||||||
|
logrus.Fatal("Not enough arguments: name, claimID, address, blockchainName, claimAmount")
|
||||||
|
}
|
||||||
|
println("Supporty!")
|
||||||
|
lbrycrd, err := util.GetLbrycrdClient(os.Getenv("LBRYCRD_STRING"))
|
||||||
|
if err != nil {
|
||||||
|
logrus.Fatal(err)
|
||||||
|
}
|
||||||
|
if lbrycrd == nil {
|
||||||
|
logrus.Fatal("Lbrycrd Client is nil")
|
||||||
|
}
|
||||||
|
amount, err := strconv.ParseFloat(os.Args[5], 64)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
name := os.Args[1]
|
||||||
|
claimid := os.Args[2]
|
||||||
|
claimAddress := os.Args[3]
|
||||||
|
blockChainName := os.Args[4]
|
||||||
|
logrus.Infof("Supporting %s[%s] with %.2f LBC on chain %s at address %s", name, claimid, amount, blockChainName, claimAddress)
|
||||||
|
hash, err := lbrycrd.SupportClaim(name, claimid, claimAddress, blockChainName, amount)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
if hash == nil {
|
||||||
|
logrus.Fatal("Tx not created!")
|
||||||
|
}
|
||||||
|
logrus.Info("Tx: ", hash.String())
|
||||||
|
}
|
|
@ -8,7 +8,7 @@ services:
|
||||||
## Wallet Server ##
|
## Wallet Server ##
|
||||||
###################
|
###################
|
||||||
walletserver:
|
walletserver:
|
||||||
image: lbry/wallet-server:v0.38.5
|
image: lbry/wallet-server:v0.73.1
|
||||||
restart: always
|
restart: always
|
||||||
networks:
|
networks:
|
||||||
lbry-network:
|
lbry-network:
|
||||||
|
|
171
go.mod
171
go.mod
|
@ -1,34 +1,151 @@
|
||||||
module github.com/lbryio/ytsync
|
go 1.17
|
||||||
|
|
||||||
|
module github.com/lbryio/ytsync/v5
|
||||||
|
|
||||||
|
replace github.com/btcsuite/btcd => github.com/lbryio/lbrycrd.go v0.0.0-20200203050410-e1076f12bf19
|
||||||
|
|
||||||
|
//replace github.com/lbryio/lbry.go/v2 => /home/niko/go/src/github.com/lbryio/lbry.go/
|
||||||
|
//replace github.com/lbryio/reflector.go => /home/niko/go/src/github.com/lbryio/reflector.go/
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/ChannelMeter/iso8601duration v0.0.0-20150204201828-8da3af7a2a61
|
github.com/abadojack/whatlanggo v1.0.1
|
||||||
github.com/Microsoft/go-winio v0.4.13 // indirect
|
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
||||||
|
github.com/aws/aws-sdk-go v1.44.6
|
||||||
|
github.com/davecgh/go-spew v1.1.1
|
||||||
|
github.com/docker/docker v20.10.17+incompatible
|
||||||
|
github.com/lbryio/lbry.go/v2 v2.7.2-0.20220815204100-2adb8af5b68c
|
||||||
|
github.com/lbryio/reflector.go v1.1.3-0.20220730181028-f5d30b1a6e79
|
||||||
|
github.com/mitchellh/go-ps v1.0.0
|
||||||
|
github.com/prometheus/client_golang v1.12.1
|
||||||
|
github.com/shopspring/decimal v1.3.1
|
||||||
|
github.com/sirupsen/logrus v1.9.0
|
||||||
|
github.com/spf13/cobra v1.4.0
|
||||||
|
github.com/stretchr/testify v1.7.1
|
||||||
|
github.com/tkanos/gonfig v0.0.0-20210106201359-53e13348de2f
|
||||||
|
github.com/vbauerster/mpb/v7 v7.4.1
|
||||||
|
gopkg.in/vansante/go-ffprobe.v2 v2.0.3
|
||||||
|
gotest.tools v2.2.0+incompatible
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||||
|
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||||
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect
|
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect
|
||||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/aws/aws-sdk-go v1.17.3
|
github.com/bluele/gcache v0.0.2 // indirect
|
||||||
github.com/channelmeter/iso8601duration v0.0.0-20150204201828-8da3af7a2a61 // indirect
|
github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7 // indirect
|
||||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3 // indirect
|
||||||
github.com/docker/docker v1.13.1
|
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
|
||||||
|
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect
|
||||||
|
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
|
||||||
|
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
|
||||||
|
github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
|
github.com/cheekybits/genny v1.0.0 // indirect
|
||||||
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
|
github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db // indirect
|
||||||
|
github.com/fatih/structs v1.1.0 // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||||
|
github.com/ghodss/yaml v1.0.0 // indirect
|
||||||
|
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||||
|
github.com/gin-gonic/gin v1.7.7 // indirect
|
||||||
|
github.com/go-errors/errors v1.1.1 // indirect
|
||||||
|
github.com/go-ini/ini v1.48.0 // indirect
|
||||||
|
github.com/go-playground/locales v0.13.0 // indirect
|
||||||
|
github.com/go-playground/universal-translator v0.17.0 // indirect
|
||||||
|
github.com/go-playground/validator/v10 v10.4.1 // indirect
|
||||||
|
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||||
|
github.com/gofrs/uuid v3.2.0+incompatible // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
|
github.com/google/btree v1.0.1 // indirect
|
||||||
|
github.com/google/go-cmp v0.5.7 // indirect
|
||||||
|
github.com/gorilla/mux v1.8.0 // indirect
|
||||||
|
github.com/gorilla/rpc v1.2.0 // indirect
|
||||||
|
github.com/gorilla/websocket v1.4.2 // indirect
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-msgpack v0.5.5 // indirect
|
github.com/hashicorp/go-msgpack v0.5.5 // indirect
|
||||||
github.com/hashicorp/memberlist v0.1.4 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/hashicorp/serf v0.8.2 // indirect
|
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||||
github.com/kr/pretty v0.1.0 // indirect
|
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||||
github.com/lbryio/errors.go v0.0.0-20180223142025-ad03d3cc6a5c
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/lbryio/lbry.go v1.0.17
|
github.com/hashicorp/memberlist v0.3.0 // indirect
|
||||||
github.com/lbryio/reflector.go v1.0.6-0.20190806185326-2e4f235489f4
|
github.com/hashicorp/serf v0.9.7 // indirect
|
||||||
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936
|
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.4.1
|
github.com/johntdyer/slack-go v0.0.0-20180213144715-95fac1160b22 // indirect
|
||||||
github.com/spf13/cobra v0.0.0-20190109003409-7547e83b2d85
|
github.com/johntdyer/slackrus v0.0.0-20211215141436-33e4a270affb // indirect
|
||||||
github.com/spf13/pflag v1.0.3 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c // indirect
|
github.com/karrick/godirwalk v1.17.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 // indirect
|
github.com/lbryio/chainquery v1.9.0 // indirect
|
||||||
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5 // indirect
|
github.com/lbryio/lbry.go v1.1.2 // indirect
|
||||||
golang.org/x/text v0.3.2 // indirect
|
github.com/lbryio/types v0.0.0-20220224142228-73610f6654a6 // indirect
|
||||||
google.golang.org/api v0.3.2
|
github.com/leodido/go-urn v1.2.0 // indirect
|
||||||
google.golang.org/grpc v1.20.0 // indirect
|
github.com/lucas-clemente/quic-go v0.28.1 // indirect
|
||||||
|
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5 // indirect
|
||||||
|
github.com/magiconair/properties v1.8.1 // indirect
|
||||||
|
github.com/marten-seemann/qpack v0.2.1 // indirect
|
||||||
|
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
|
||||||
|
github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect
|
||||||
|
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
|
||||||
|
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||||
|
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
|
github.com/miekg/dns v1.1.41 // indirect
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/nxadm/tail v1.4.8 // indirect
|
||||||
|
github.com/onsi/ginkgo v1.16.4 // indirect
|
||||||
|
github.com/onsi/gomega v1.17.0 // indirect
|
||||||
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||||
|
github.com/pelletier/go-toml v1.9.3 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
|
github.com/prometheus/common v0.32.1 // indirect
|
||||||
|
github.com/prometheus/procfs v0.7.3 // indirect
|
||||||
|
github.com/rivo/uniseg v0.2.0 // indirect
|
||||||
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||||
|
github.com/slack-go/slack v0.10.3 // indirect
|
||||||
|
github.com/spf13/afero v1.4.1 // indirect
|
||||||
|
github.com/spf13/cast v1.4.1 // indirect
|
||||||
|
github.com/spf13/jwalterweatherman v1.0.0 // indirect
|
||||||
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
|
github.com/spf13/viper v1.7.1 // indirect
|
||||||
|
github.com/subosito/gotenv v1.2.0 // indirect
|
||||||
|
github.com/ugorji/go/codec v1.1.7 // indirect
|
||||||
|
github.com/volatiletech/inflect v0.0.0-20170731032912-e7201282ae8d // indirect
|
||||||
|
github.com/volatiletech/null v8.0.0+incompatible // indirect
|
||||||
|
github.com/volatiletech/sqlboiler v3.4.0+incompatible // indirect
|
||||||
|
github.com/ybbus/jsonrpc v2.1.2+incompatible // indirect
|
||||||
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
||||||
|
golang.org/x/mod v0.4.2 // indirect
|
||||||
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||||
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
|
||||||
|
golang.org/x/text v0.3.7 // indirect
|
||||||
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||||
|
golang.org/x/tools v0.1.5 // indirect
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
|
google.golang.org/protobuf v1.27.1 // indirect
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
|
gopkg.in/ini.v1 v1.60.2 // indirect
|
||||||
|
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79 // indirect
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
|
gotest.tools/v3 v3.2.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,122 +0,0 @@
|
||||||
package ipManager
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/asaskevich/govalidator"
|
|
||||||
"github.com/lbryio/ytsync/util"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/extras/errors"
|
|
||||||
"github.com/lbryio/lbry.go/extras/stop"
|
|
||||||
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const IPCooldownPeriod = 25 * time.Second
|
|
||||||
const unbanTimeout = 3 * time.Hour
|
|
||||||
|
|
||||||
var ipv6Pool []string
|
|
||||||
var ipv4Pool []string
|
|
||||||
var throttledIPs map[string]bool
|
|
||||||
var ipInUse map[string]bool
|
|
||||||
var ipLastUsed map[string]time.Time
|
|
||||||
var ipMutex sync.Mutex
|
|
||||||
var stopper = stop.New()
|
|
||||||
|
|
||||||
func GetNextIP(ipv6 bool) (string, error) {
|
|
||||||
ipMutex.Lock()
|
|
||||||
defer ipMutex.Unlock()
|
|
||||||
if len(ipv4Pool) < 1 || len(ipv6Pool) < 1 {
|
|
||||||
throttledIPs = make(map[string]bool)
|
|
||||||
ipInUse = make(map[string]bool)
|
|
||||||
ipLastUsed = make(map[string]time.Time)
|
|
||||||
addrs, err := net.InterfaceAddrs()
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.Err(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, address := range addrs {
|
|
||||||
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
|
||||||
if ipnet.IP.To16() != nil && govalidator.IsIPv6(ipnet.IP.String()) {
|
|
||||||
ipv6Pool = append(ipv6Pool, ipnet.IP.String())
|
|
||||||
ipLastUsed[ipnet.IP.String()] = time.Now().Add(-IPCooldownPeriod)
|
|
||||||
} else if ipnet.IP.To4() != nil && govalidator.IsIPv4(ipnet.IP.String()) {
|
|
||||||
ipv4Pool = append(ipv4Pool, ipnet.IP.String())
|
|
||||||
ipLastUsed[ipnet.IP.String()] = time.Now().Add(-IPCooldownPeriod)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nextIP := ""
|
|
||||||
if ipv6 {
|
|
||||||
nextIP = getLeastUsedIP(ipv6Pool)
|
|
||||||
} else {
|
|
||||||
nextIP = getLeastUsedIP(ipv4Pool)
|
|
||||||
}
|
|
||||||
if nextIP == "" {
|
|
||||||
return "throttled", errors.Err("all IPs are throttled")
|
|
||||||
}
|
|
||||||
lastUse := ipLastUsed[nextIP]
|
|
||||||
if time.Since(lastUse) < IPCooldownPeriod {
|
|
||||||
log.Debugf("The IP %s is too hot, waiting for %.1f seconds before continuing", nextIP, (IPCooldownPeriod - time.Since(lastUse)).Seconds())
|
|
||||||
time.Sleep(IPCooldownPeriod - time.Since(lastUse))
|
|
||||||
}
|
|
||||||
|
|
||||||
ipInUse[nextIP] = true
|
|
||||||
return nextIP, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ReleaseIP(ip string) {
|
|
||||||
ipMutex.Lock()
|
|
||||||
defer ipMutex.Unlock()
|
|
||||||
ipLastUsed[ip] = time.Now()
|
|
||||||
ipInUse[ip] = false
|
|
||||||
}
|
|
||||||
|
|
||||||
func getLeastUsedIP(ipPool []string) string {
|
|
||||||
nextIP := ""
|
|
||||||
veryLastUse := time.Now()
|
|
||||||
for _, ip := range ipPool {
|
|
||||||
isThrottled := throttledIPs[ip]
|
|
||||||
if isThrottled {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
inUse := ipInUse[ip]
|
|
||||||
if inUse {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
lastUse := ipLastUsed[ip]
|
|
||||||
if lastUse.Before(veryLastUse) {
|
|
||||||
nextIP = ip
|
|
||||||
veryLastUse = lastUse
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nextIP
|
|
||||||
}
|
|
||||||
|
|
||||||
func SetIpThrottled(ip string, stopGrp *stop.Group) {
|
|
||||||
ipMutex.Lock()
|
|
||||||
isThrottled := throttledIPs[ip]
|
|
||||||
if isThrottled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
throttledIPs[ip] = true
|
|
||||||
ipMutex.Unlock()
|
|
||||||
util.SendErrorToSlack("%s set to throttled", ip)
|
|
||||||
|
|
||||||
stopper.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer stopper.Done()
|
|
||||||
unbanTimer := time.NewTimer(unbanTimeout)
|
|
||||||
select {
|
|
||||||
case <-unbanTimer.C:
|
|
||||||
ipMutex.Lock()
|
|
||||||
throttledIPs[ip] = false
|
|
||||||
ipMutex.Unlock()
|
|
||||||
util.SendInfoToSlack("%s set back to not throttled", ip)
|
|
||||||
case <-stopGrp.Ch():
|
|
||||||
unbanTimer.Stop()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
227
ip_manager/throttle.go
Normal file
227
ip_manager/throttle.go
Normal file
|
@ -0,0 +1,227 @@
|
||||||
|
package ip_manager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/asaskevich/govalidator"
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||||
|
"github.com/lbryio/ytsync/v5/util"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const IPCooldownPeriod = 20 * time.Second
|
||||||
|
const unbanTimeout = 48 * time.Hour
|
||||||
|
|
||||||
|
var stopper = stop.New()
|
||||||
|
|
||||||
|
type IPPool struct {
|
||||||
|
ips []throttledIP
|
||||||
|
lock *sync.RWMutex
|
||||||
|
stopGrp *stop.Group
|
||||||
|
}
|
||||||
|
|
||||||
|
type throttledIP struct {
|
||||||
|
IP string
|
||||||
|
UsedForVideo string
|
||||||
|
LastUse time.Time
|
||||||
|
Throttled bool
|
||||||
|
InUse bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var ipPoolInstance *IPPool
|
||||||
|
|
||||||
|
func GetIPPool(stopGrp *stop.Group) (*IPPool, error) {
|
||||||
|
if ipPoolInstance != nil {
|
||||||
|
return ipPoolInstance, nil
|
||||||
|
}
|
||||||
|
addrs, err := net.InterfaceAddrs()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
var pool []throttledIP
|
||||||
|
for _, address := range addrs {
|
||||||
|
if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsGlobalUnicast() {
|
||||||
|
if ipnet.IP.To16() != nil && govalidator.IsIPv6(ipnet.IP.String()) {
|
||||||
|
pool = append(pool, throttledIP{
|
||||||
|
IP: ipnet.IP.String(),
|
||||||
|
LastUse: time.Now().Add(-5 * time.Minute),
|
||||||
|
})
|
||||||
|
} else if ipnet.IP.To4() != nil && govalidator.IsIPv4(ipnet.IP.String()) {
|
||||||
|
pool = append(pool, throttledIP{
|
||||||
|
IP: ipnet.IP.String(),
|
||||||
|
LastUse: time.Now().Add(-5 * time.Minute),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ipPoolInstance = &IPPool{
|
||||||
|
ips: pool,
|
||||||
|
lock: &sync.RWMutex{},
|
||||||
|
stopGrp: stopGrp,
|
||||||
|
}
|
||||||
|
//ticker := time.NewTicker(10 * time.Second)
|
||||||
|
//go func() {
|
||||||
|
// for {
|
||||||
|
// select {
|
||||||
|
// case <-stopGrp.Ch():
|
||||||
|
// return
|
||||||
|
// case <-ticker.C:
|
||||||
|
// ipPoolInstance.lock.RLock()
|
||||||
|
// for _, ip := range ipPoolInstance.ips {
|
||||||
|
// log.Debugf("IP: %s\tInUse: %t\tVideoID: %s\tThrottled: %t\tLastUse: %.1f", ip.IP, ip.InUse, ip.UsedForVideo, ip.Throttled, time.Since(ip.LastUse).Seconds())
|
||||||
|
// }
|
||||||
|
// ipPoolInstance.lock.RUnlock()
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//}()
|
||||||
|
return ipPoolInstance, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllThrottled checks whether the IPs provided are all throttled.
|
||||||
|
// returns false if at least one IP is not throttled
|
||||||
|
// Not thread safe, should use locking when called
|
||||||
|
func AllThrottled(ips []throttledIP) bool {
|
||||||
|
for _, i := range ips {
|
||||||
|
if !i.Throttled {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllInUse checks whether the IPs provided are all currently in use.
|
||||||
|
// returns false if at least one IP is not in use AND is not throttled
|
||||||
|
// Not thread safe, should use locking when called
|
||||||
|
func AllInUse(ips []throttledIP) bool {
|
||||||
|
for _, i := range ips {
|
||||||
|
if !i.InUse && !i.Throttled {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPPool) ReleaseIP(ip string) {
|
||||||
|
i.lock.Lock()
|
||||||
|
defer i.lock.Unlock()
|
||||||
|
for j := range i.ips {
|
||||||
|
localIP := &i.ips[j]
|
||||||
|
if localIP.IP == ip {
|
||||||
|
localIP.InUse = false
|
||||||
|
localIP.LastUse = time.Now()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
util.SendErrorToSlack("something went wrong while releasing the IP %s as we reached the end of the function", ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPPool) ReleaseAll() {
|
||||||
|
i.lock.Lock()
|
||||||
|
defer i.lock.Unlock()
|
||||||
|
for j := range i.ips {
|
||||||
|
if i.ips[j].Throttled {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
localIP := &i.ips[j]
|
||||||
|
localIP.InUse = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPPool) SetThrottled(ip string) {
|
||||||
|
i.lock.Lock()
|
||||||
|
defer i.lock.Unlock()
|
||||||
|
var tIP *throttledIP
|
||||||
|
for j, _ := range i.ips {
|
||||||
|
localIP := &i.ips[j]
|
||||||
|
if localIP.IP == ip {
|
||||||
|
if localIP.Throttled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
localIP.Throttled = true
|
||||||
|
tIP = localIP
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
util.SendErrorToSlack("%s set to throttled", ip)
|
||||||
|
|
||||||
|
stopper.Add(1)
|
||||||
|
go func(tIP *throttledIP) {
|
||||||
|
defer stopper.Done()
|
||||||
|
unbanTimer := time.NewTimer(unbanTimeout)
|
||||||
|
select {
|
||||||
|
case <-unbanTimer.C:
|
||||||
|
i.lock.Lock()
|
||||||
|
tIP.Throttled = false
|
||||||
|
i.lock.Unlock()
|
||||||
|
util.SendInfoToSlack("%s set back to not throttled", ip)
|
||||||
|
case <-i.stopGrp.Ch():
|
||||||
|
unbanTimer.Stop()
|
||||||
|
}
|
||||||
|
}(tIP)
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrAllInUse = errors.Base("all IPs are in use, try again")
|
||||||
|
var ErrAllThrottled = errors.Base("all IPs are throttled")
|
||||||
|
var ErrResourceLock = errors.Base("error getting next ip, did you forget to lock on the resource?")
|
||||||
|
var ErrInterruptedByUser = errors.Base("interrupted by user")
|
||||||
|
|
||||||
|
func (i *IPPool) nextIP(forVideo string) (*throttledIP, error) {
|
||||||
|
i.lock.Lock()
|
||||||
|
defer i.lock.Unlock()
|
||||||
|
|
||||||
|
sort.Slice(i.ips, func(j, k int) bool {
|
||||||
|
return i.ips[j].LastUse.Before(i.ips[k].LastUse)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !AllThrottled(i.ips) {
|
||||||
|
if AllInUse(i.ips) {
|
||||||
|
return nil, errors.Err(ErrAllInUse)
|
||||||
|
}
|
||||||
|
|
||||||
|
var nextIP *throttledIP
|
||||||
|
for j := range i.ips {
|
||||||
|
ip := &i.ips[j]
|
||||||
|
if ip.InUse || ip.Throttled {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nextIP = ip
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if nextIP == nil {
|
||||||
|
return nil, errors.Err(ErrResourceLock)
|
||||||
|
}
|
||||||
|
nextIP.InUse = true
|
||||||
|
nextIP.UsedForVideo = forVideo
|
||||||
|
return nextIP, nil
|
||||||
|
}
|
||||||
|
return nil, errors.Err(ErrAllThrottled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPPool) GetIP(forVideo string) (string, error) {
|
||||||
|
for {
|
||||||
|
ip, err := i.nextIP(forVideo)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrAllInUse) {
|
||||||
|
select {
|
||||||
|
case <-i.stopGrp.Ch():
|
||||||
|
return "", errors.Err(ErrInterruptedByUser)
|
||||||
|
default:
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if errors.Is(err, ErrAllThrottled) {
|
||||||
|
return "throttled", err
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if time.Since(ip.LastUse) < IPCooldownPeriod {
|
||||||
|
log.Debugf("The IP %s is too hot, waiting for %.1f seconds before continuing", ip.IP, (IPCooldownPeriod - time.Since(ip.LastUse)).Seconds())
|
||||||
|
time.Sleep(IPCooldownPeriod - time.Since(ip.LastUse))
|
||||||
|
}
|
||||||
|
return ip.IP, nil
|
||||||
|
}
|
||||||
|
}
|
40
ip_manager/throttle_test.go
Normal file
40
ip_manager/throttle_test.go
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
package ip_manager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAll(t *testing.T) {
|
||||||
|
pool, err := GetIPPool()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
ip, err := pool.GetIP()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Log(ip)
|
||||||
|
pool.ReleaseIP(ip)
|
||||||
|
ip2, err := pool.GetIP()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if ip == ip2 && len(pool.ips) > 1 {
|
||||||
|
t.Fatalf("the same IP was returned twice! %s, %s", ip, ip2)
|
||||||
|
}
|
||||||
|
t.Log(ip2)
|
||||||
|
pool.ReleaseIP(ip2)
|
||||||
|
|
||||||
|
for range pool.ips {
|
||||||
|
_, err = pool.GetIP()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
next, err := pool.nextIP()
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("%s", err.Error())
|
||||||
|
} else {
|
||||||
|
t.Fatal(next)
|
||||||
|
}
|
||||||
|
}
|
188
main.go
188
main.go
|
@ -3,15 +3,19 @@ package main
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/extras/errors"
|
"github.com/lbryio/ytsync/v5/configs"
|
||||||
"github.com/lbryio/lbry.go/extras/util"
|
"github.com/lbryio/ytsync/v5/manager"
|
||||||
"github.com/lbryio/ytsync/manager"
|
"github.com/lbryio/ytsync/v5/shared"
|
||||||
"github.com/lbryio/ytsync/sdk"
|
ytUtils "github.com/lbryio/ytsync/v5/util"
|
||||||
ytUtils "github.com/lbryio/ytsync/util"
|
|
||||||
|
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
@ -21,30 +25,21 @@ var Version string
|
||||||
const defaultMaxTries = 3
|
const defaultMaxTries = 3
|
||||||
|
|
||||||
var (
|
var (
|
||||||
stopOnError bool
|
cliFlags shared.SyncFlags
|
||||||
maxTries int
|
maxVideoLength int
|
||||||
takeOverExistingChannel bool
|
|
||||||
refill int
|
|
||||||
limit int
|
|
||||||
skipSpaceCheck bool
|
|
||||||
syncUpdate bool
|
|
||||||
singleRun bool
|
|
||||||
syncStatus string
|
|
||||||
channelID string
|
|
||||||
syncFrom int64
|
|
||||||
syncUntil int64
|
|
||||||
concurrentJobs int
|
|
||||||
videosLimit int
|
|
||||||
maxVideoSize int
|
|
||||||
maxVideoLength float64
|
|
||||||
removeDBUnpublished bool
|
|
||||||
upgradeMetadata bool
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
log.SetLevel(log.DebugLevel)
|
log.SetLevel(log.DebugLevel)
|
||||||
|
customFormatter := new(log.TextFormatter)
|
||||||
|
customFormatter.TimestampFormat = "2006-01-02 15:04:05"
|
||||||
|
customFormatter.FullTimestamp = true
|
||||||
|
log.SetFormatter(customFormatter)
|
||||||
|
http.Handle("/metrics", promhttp.Handler())
|
||||||
|
go func() {
|
||||||
|
log.Error(http.ListenAndServe(":2112", nil))
|
||||||
|
}()
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "ytsync",
|
Use: "ytsync",
|
||||||
Short: "Publish youtube channels into LBRY network automatically.",
|
Short: "Publish youtube channels into LBRY network automatically.",
|
||||||
|
@ -52,23 +47,25 @@ func main() {
|
||||||
Args: cobra.RangeArgs(0, 0),
|
Args: cobra.RangeArgs(0, 0),
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.Flags().BoolVar(&stopOnError, "stop-on-error", false, "If a publish fails, stop all publishing and exit")
|
cmd.Flags().IntVar(&cliFlags.MaxTries, "max-tries", defaultMaxTries, "Number of times to try a publish that fails")
|
||||||
cmd.Flags().IntVar(&maxTries, "max-tries", defaultMaxTries, "Number of times to try a publish that fails")
|
cmd.Flags().BoolVar(&cliFlags.TakeOverExistingChannel, "takeover-existing-channel", false, "If channel exists and we don't own it, take over the channel")
|
||||||
cmd.Flags().BoolVar(&takeOverExistingChannel, "takeover-existing-channel", false, "If channel exists and we don't own it, take over the channel")
|
cmd.Flags().IntVar(&cliFlags.Limit, "limit", 0, "limit the amount of channels to sync")
|
||||||
cmd.Flags().IntVar(&limit, "limit", 0, "limit the amount of channels to sync")
|
cmd.Flags().BoolVar(&cliFlags.SkipSpaceCheck, "skip-space-check", false, "Do not perform free space check on startup")
|
||||||
cmd.Flags().BoolVar(&skipSpaceCheck, "skip-space-check", false, "Do not perform free space check on startup")
|
cmd.Flags().BoolVar(&cliFlags.SyncUpdate, "update", false, "Update previously synced channels instead of syncing new ones")
|
||||||
cmd.Flags().BoolVar(&syncUpdate, "update", false, "Update previously synced channels instead of syncing new ones")
|
cmd.Flags().BoolVar(&cliFlags.SingleRun, "run-once", false, "Whether the process should be stopped after one cycle or not")
|
||||||
cmd.Flags().BoolVar(&singleRun, "run-once", false, "Whether the process should be stopped after one cycle or not")
|
cmd.Flags().BoolVar(&cliFlags.RemoveDBUnpublished, "remove-db-unpublished", false, "Remove videos from the database that are marked as published but aren't really published")
|
||||||
cmd.Flags().BoolVar(&removeDBUnpublished, "remove-db-unpublished", false, "Remove videos from the database that are marked as published but aren't really published")
|
cmd.Flags().BoolVar(&cliFlags.UpgradeMetadata, "upgrade-metadata", false, "Upgrade videos if they're on the old metadata version")
|
||||||
cmd.Flags().BoolVar(&upgradeMetadata, "upgrade-metadata", false, "Upgrade videos if they're on the old metadata version")
|
cmd.Flags().BoolVar(&cliFlags.DisableTransfers, "no-transfers", false, "Skips the transferring process of videos, channels and supports")
|
||||||
cmd.Flags().StringVar(&syncStatus, "status", "", "Specify which queue to pull from. Overrides --update")
|
cmd.Flags().BoolVar(&cliFlags.QuickSync, "quick", false, "Look up only the last 50 videos from youtube")
|
||||||
cmd.Flags().StringVar(&channelID, "channelID", "", "If specified, only this channel will be synced.")
|
cmd.Flags().StringVar(&cliFlags.Status, "status", "", "Specify which queue to pull from. Overrides --update")
|
||||||
cmd.Flags().Int64Var(&syncFrom, "after", time.Unix(0, 0).Unix(), "Specify from when to pull jobs [Unix time](Default: 0)")
|
cmd.Flags().StringVar(&cliFlags.SecondaryStatus, "status2", "", "Specify which secondary queue to pull from.")
|
||||||
cmd.Flags().Int64Var(&syncUntil, "before", time.Now().AddDate(1, 0, 0).Unix(), "Specify until when to pull jobs [Unix time](Default: current Unix time)")
|
cmd.Flags().StringVar(&cliFlags.ChannelID, "channelID", "", "If specified, only this channel will be synced.")
|
||||||
cmd.Flags().IntVar(&concurrentJobs, "concurrent-jobs", 1, "how many jobs to process concurrently")
|
cmd.Flags().Int64Var(&cliFlags.SyncFrom, "after", time.Unix(0, 0).Unix(), "Specify from when to pull jobs [Unix time](Default: 0)")
|
||||||
cmd.Flags().IntVar(&videosLimit, "videos-limit", 1000, "how many videos to process per channel")
|
cmd.Flags().Int64Var(&cliFlags.SyncUntil, "before", time.Now().AddDate(1, 0, 0).Unix(), "Specify until when to pull jobs [Unix time](Default: current Unix time)")
|
||||||
cmd.Flags().IntVar(&maxVideoSize, "max-size", 2048, "Maximum video size to process (in MB)")
|
cmd.Flags().IntVar(&cliFlags.ConcurrentJobs, "concurrent-jobs", 1, "how many jobs to process concurrently")
|
||||||
cmd.Flags().Float64Var(&maxVideoLength, "max-length", 2.0, "Maximum video length to process (in hours)")
|
cmd.Flags().IntVar(&cliFlags.VideosLimit, "videos-limit", 0, "how many videos to process per channel (leave 0 for automatic detection)")
|
||||||
|
cmd.Flags().IntVar(&cliFlags.MaxVideoSize, "max-size", 2048, "Maximum video size to process (in MB)")
|
||||||
|
cmd.Flags().IntVar(&maxVideoLength, "max-length", 2, "Maximum video length to process (in hours)")
|
||||||
|
|
||||||
if err := cmd.Execute(); err != nil {
|
if err := cmd.Execute(); err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
|
@ -77,123 +74,60 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func ytSync(cmd *cobra.Command, args []string) {
|
func ytSync(cmd *cobra.Command, args []string) {
|
||||||
var hostname string
|
err := configs.Init("./config.json")
|
||||||
slackToken := os.Getenv("SLACK_TOKEN")
|
|
||||||
if slackToken == "" {
|
|
||||||
log.Error("A slack token was not present in env vars! Slack messages disabled!")
|
|
||||||
} else {
|
|
||||||
var err error
|
|
||||||
hostname, err = os.Hostname()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("could not detect system hostname")
|
log.Fatalf("could not parse configuration file: %s", errors.FullTrace(err))
|
||||||
hostname = "ytsync-unknown"
|
|
||||||
}
|
|
||||||
if len(hostname) > 30 {
|
|
||||||
hostname = hostname[0:30]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
util.InitSlack(os.Getenv("SLACK_TOKEN"), os.Getenv("SLACK_CHANNEL"), hostname)
|
if configs.Configuration.SlackToken == "" {
|
||||||
|
log.Error("A slack token was not present in the config! Slack messages disabled!")
|
||||||
|
} else {
|
||||||
|
util.InitSlack(configs.Configuration.SlackToken, configs.Configuration.SlackChannel, configs.Configuration.GetHostname())
|
||||||
}
|
}
|
||||||
|
|
||||||
if syncStatus != "" && !util.InSlice(syncStatus, manager.SyncStatuses) {
|
if cliFlags.Status != "" && !util.InSlice(cliFlags.Status, shared.SyncStatuses) {
|
||||||
log.Errorf("status must be one of the following: %v\n", manager.SyncStatuses)
|
log.Errorf("status must be one of the following: %v\n", shared.SyncStatuses)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if stopOnError && maxTries != defaultMaxTries {
|
if cliFlags.MaxTries < 1 {
|
||||||
log.Errorln("--stop-on-error and --max-tries are mutually exclusive")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if maxTries < 1 {
|
|
||||||
log.Errorln("setting --max-tries less than 1 doesn't make sense")
|
log.Errorln("setting --max-tries less than 1 doesn't make sense")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if limit < 0 {
|
if cliFlags.Limit < 0 {
|
||||||
log.Errorln("setting --limit less than 0 (unlimited) doesn't make sense")
|
log.Errorln("setting --limit less than 0 (unlimited) doesn't make sense")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
cliFlags.MaxVideoLength = time.Duration(maxVideoLength) * time.Hour
|
||||||
|
|
||||||
apiURL := os.Getenv("LBRY_WEB_API")
|
if configs.Configuration.InternalApisEndpoint == "" {
|
||||||
apiToken := os.Getenv("LBRY_API_TOKEN")
|
log.Errorln("An Internal APIs Endpoint was not defined")
|
||||||
youtubeAPIKey := os.Getenv("YOUTUBE_API_KEY")
|
|
||||||
lbrycrdString := os.Getenv("LBRYCRD_STRING")
|
|
||||||
awsS3ID := os.Getenv("AWS_S3_ID")
|
|
||||||
awsS3Secret := os.Getenv("AWS_S3_SECRET")
|
|
||||||
awsS3Region := os.Getenv("AWS_S3_REGION")
|
|
||||||
awsS3Bucket := os.Getenv("AWS_S3_BUCKET")
|
|
||||||
if apiURL == "" {
|
|
||||||
log.Errorln("An API URL was not defined. Please set the environment variable LBRY_WEB_API")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if apiToken == "" {
|
if configs.Configuration.InternalApisAuthToken == "" {
|
||||||
log.Errorln("An API Token was not defined. Please set the environment variable LBRY_API_TOKEN")
|
log.Errorln("An Internal APIs auth token was not defined")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if youtubeAPIKey == "" {
|
if configs.Configuration.WalletS3Config.ID == "" || configs.Configuration.WalletS3Config.Region == "" || configs.Configuration.WalletS3Config.Bucket == "" || configs.Configuration.WalletS3Config.Secret == "" || configs.Configuration.WalletS3Config.Endpoint == "" {
|
||||||
log.Errorln("A Youtube API key was not defined. Please set the environment variable YOUTUBE_API_KEY")
|
log.Errorln("Wallet S3 configuration is incomplete")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if awsS3ID == "" {
|
if configs.Configuration.BlockchaindbS3Config.ID == "" || configs.Configuration.BlockchaindbS3Config.Region == "" || configs.Configuration.BlockchaindbS3Config.Bucket == "" || configs.Configuration.BlockchaindbS3Config.Secret == "" || configs.Configuration.BlockchaindbS3Config.Endpoint == "" {
|
||||||
log.Errorln("AWS S3 ID credentials were not defined. Please set the environment variable AWS_S3_ID")
|
log.Errorln("Blockchain DBs S3 configuration is incomplete")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if awsS3Secret == "" {
|
if configs.Configuration.LbrycrdString == "" {
|
||||||
log.Errorln("AWS S3 Secret credentials were not defined. Please set the environment variable AWS_S3_SECRET")
|
log.Infoln("Using default (local) lbrycrd instance. Set lbrycrd_string if you want to use something else")
|
||||||
return
|
|
||||||
}
|
|
||||||
if awsS3Region == "" {
|
|
||||||
log.Errorln("AWS S3 Region was not defined. Please set the environment variable AWS_S3_REGION")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if awsS3Bucket == "" {
|
|
||||||
log.Errorln("AWS S3 Bucket was not defined. Please set the environment variable AWS_S3_BUCKET")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if lbrycrdString == "" {
|
|
||||||
log.Infoln("Using default (local) lbrycrd instance. Set LBRYCRD_STRING if you want to use something else")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
blobsDir := ytUtils.GetBlobsDir()
|
blobsDir := ytUtils.GetBlobsDir()
|
||||||
|
|
||||||
syncProperties := &sdk.SyncProperties{
|
|
||||||
SyncFrom: syncFrom,
|
|
||||||
SyncUntil: syncUntil,
|
|
||||||
YoutubeChannelID: channelID,
|
|
||||||
}
|
|
||||||
apiConfig := &sdk.APIConfig{
|
|
||||||
YoutubeAPIKey: youtubeAPIKey,
|
|
||||||
ApiURL: apiURL,
|
|
||||||
ApiToken: apiToken,
|
|
||||||
HostName: hostname,
|
|
||||||
}
|
|
||||||
sm := manager.NewSyncManager(
|
sm := manager.NewSyncManager(
|
||||||
stopOnError,
|
cliFlags,
|
||||||
maxTries,
|
|
||||||
takeOverExistingChannel,
|
|
||||||
refill,
|
|
||||||
limit,
|
|
||||||
skipSpaceCheck,
|
|
||||||
syncUpdate,
|
|
||||||
concurrentJobs,
|
|
||||||
concurrentJobs,
|
|
||||||
blobsDir,
|
blobsDir,
|
||||||
videosLimit,
|
|
||||||
maxVideoSize,
|
|
||||||
lbrycrdString,
|
|
||||||
awsS3ID,
|
|
||||||
awsS3Secret,
|
|
||||||
awsS3Region,
|
|
||||||
awsS3Bucket,
|
|
||||||
syncStatus,
|
|
||||||
singleRun,
|
|
||||||
syncProperties,
|
|
||||||
apiConfig,
|
|
||||||
maxVideoLength,
|
|
||||||
removeDBUnpublished,
|
|
||||||
upgradeMetadata,
|
|
||||||
)
|
)
|
||||||
err := sm.Start()
|
err = sm.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ytUtils.SendErrorToSlack(errors.FullTrace(err))
|
ytUtils.SendErrorToSlack(errors.FullTrace(err))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,32 +0,0 @@
|
||||||
package manager
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/extras/errors"
|
|
||||||
|
|
||||||
"google.golang.org/api/googleapi/transport"
|
|
||||||
"google.golang.org/api/youtube/v3"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s *Sync) CountVideos() (uint64, error) {
|
|
||||||
client := &http.Client{
|
|
||||||
Transport: &transport.APIKey{Key: s.APIConfig.YoutubeAPIKey},
|
|
||||||
}
|
|
||||||
|
|
||||||
service, err := youtube.New(client)
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.Prefix("error creating YouTube service", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := service.Channels.List("statistics").Id(s.YoutubeChannelID).Do()
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.Prefix("error getting channels", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(response.Items) < 1 {
|
|
||||||
return 0, errors.Err("youtube channel not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
return response.Items[0].Statistics.VideoCount, nil
|
|
||||||
}
|
|
|
@ -3,104 +3,53 @@ package manager
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/lbryio/ytsync/blobs_reflector"
|
"github.com/lbryio/ytsync/v5/blobs_reflector"
|
||||||
"github.com/lbryio/ytsync/namer"
|
"github.com/lbryio/ytsync/v5/configs"
|
||||||
"github.com/lbryio/ytsync/sdk"
|
"github.com/lbryio/ytsync/v5/ip_manager"
|
||||||
logUtils "github.com/lbryio/ytsync/util"
|
"github.com/lbryio/ytsync/v5/namer"
|
||||||
|
"github.com/lbryio/ytsync/v5/sdk"
|
||||||
|
"github.com/lbryio/ytsync/v5/shared"
|
||||||
|
logUtils "github.com/lbryio/ytsync/v5/util"
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/extras/errors"
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
"github.com/lbryio/lbry.go/extras/util"
|
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SyncManager struct {
|
type SyncManager struct {
|
||||||
stopOnError bool
|
CliFlags shared.SyncFlags
|
||||||
maxTries int
|
ApiConfig *sdk.APIConfig
|
||||||
takeOverExistingChannel bool
|
LbrycrdDsn string
|
||||||
refill int
|
|
||||||
limit int
|
|
||||||
skipSpaceCheck bool
|
|
||||||
syncUpdate bool
|
|
||||||
concurrentJobs int
|
|
||||||
concurrentVideos int
|
|
||||||
blobsDir string
|
blobsDir string
|
||||||
videosLimit int
|
channelsToSync []Sync
|
||||||
maxVideoSize int
|
|
||||||
maxVideoLength float64
|
|
||||||
lbrycrdString string
|
|
||||||
awsS3ID string
|
|
||||||
awsS3Secret string
|
|
||||||
awsS3Region string
|
|
||||||
syncStatus string
|
|
||||||
awsS3Bucket string
|
|
||||||
singleRun bool
|
|
||||||
syncProperties *sdk.SyncProperties
|
|
||||||
apiConfig *sdk.APIConfig
|
|
||||||
removeDBUnpublished bool
|
|
||||||
upgradeMetadata bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSyncManager(stopOnError bool, maxTries int, takeOverExistingChannel bool, refill int, limit int,
|
func NewSyncManager(cliFlags shared.SyncFlags, blobsDir string) *SyncManager {
|
||||||
skipSpaceCheck bool, syncUpdate bool, concurrentJobs int, concurrentVideos int, blobsDir string, videosLimit int,
|
|
||||||
maxVideoSize int, lbrycrdString string, awsS3ID string, awsS3Secret string, awsS3Region string, awsS3Bucket string,
|
|
||||||
syncStatus string, singleRun bool, syncProperties *sdk.SyncProperties, apiConfig *sdk.APIConfig, maxVideoLength float64, removeDBUnpublished bool, upgradeMetadata bool) *SyncManager {
|
|
||||||
return &SyncManager{
|
return &SyncManager{
|
||||||
stopOnError: stopOnError,
|
CliFlags: cliFlags,
|
||||||
maxTries: maxTries,
|
|
||||||
takeOverExistingChannel: takeOverExistingChannel,
|
|
||||||
refill: refill,
|
|
||||||
limit: limit,
|
|
||||||
skipSpaceCheck: skipSpaceCheck,
|
|
||||||
syncUpdate: syncUpdate,
|
|
||||||
concurrentJobs: concurrentJobs,
|
|
||||||
concurrentVideos: concurrentVideos,
|
|
||||||
blobsDir: blobsDir,
|
blobsDir: blobsDir,
|
||||||
videosLimit: videosLimit,
|
LbrycrdDsn: configs.Configuration.LbrycrdString,
|
||||||
maxVideoSize: maxVideoSize,
|
ApiConfig: sdk.GetAPIsConfigs(),
|
||||||
maxVideoLength: maxVideoLength,
|
|
||||||
lbrycrdString: lbrycrdString,
|
|
||||||
awsS3ID: awsS3ID,
|
|
||||||
awsS3Secret: awsS3Secret,
|
|
||||||
awsS3Region: awsS3Region,
|
|
||||||
awsS3Bucket: awsS3Bucket,
|
|
||||||
syncStatus: syncStatus,
|
|
||||||
singleRun: singleRun,
|
|
||||||
syncProperties: syncProperties,
|
|
||||||
apiConfig: apiConfig,
|
|
||||||
removeDBUnpublished: removeDBUnpublished,
|
|
||||||
upgradeMetadata: upgradeMetadata,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func (s *SyncManager) enqueueChannel(channel *shared.YoutubeChannel) {
|
||||||
const (
|
s.channelsToSync = append(s.channelsToSync, Sync{
|
||||||
StatusPending = "pending" // waiting for permission to sync
|
DbChannelData: channel,
|
||||||
StatusPendingEmail = "pendingemail" // permission granted but missing email
|
Manager: s,
|
||||||
StatusQueued = "queued" // in sync queue. will be synced soon
|
namer: namer.NewNamer(),
|
||||||
StatusPendingUpgrade = "pendingupgrade" // in sync queue. will be synced soon
|
hardVideoFailure: hardVideoFailure{
|
||||||
StatusSyncing = "syncing" // syncing now
|
lock: &sync.Mutex{},
|
||||||
StatusSynced = "synced" // done
|
},
|
||||||
StatusFailed = "failed"
|
})
|
||||||
StatusFinalized = "finalized" // no more changes allowed
|
}
|
||||||
StatusAbandoned = "abandoned" // deleted on youtube or banned
|
|
||||||
)
|
|
||||||
|
|
||||||
var SyncStatuses = []string{StatusPending, StatusPendingEmail, StatusPendingUpgrade, StatusQueued, StatusSyncing, StatusSynced, StatusFailed, StatusFinalized, StatusAbandoned}
|
|
||||||
|
|
||||||
const (
|
|
||||||
VideoStatusPublished = "published"
|
|
||||||
VideoStatusFailed = "failed"
|
|
||||||
VideoStatusUpgradeFailed = "upgradefailed"
|
|
||||||
VideoStatusUnpublished = "unpublished"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s *SyncManager) Start() error {
|
func (s *SyncManager) Start() error {
|
||||||
|
|
||||||
if logUtils.ShouldCleanOnStartup() {
|
if logUtils.ShouldCleanOnStartup() {
|
||||||
err := logUtils.CleanForStartup()
|
err := logUtils.CleanForStartup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -108,149 +57,131 @@ func (s *SyncManager) Start() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var lastChannelProcessed string
|
||||||
|
var secondLastChannelProcessed string
|
||||||
syncCount := 0
|
syncCount := 0
|
||||||
for {
|
for {
|
||||||
|
s.channelsToSync = make([]Sync, 0, 10) // reset sync queue
|
||||||
err := s.checkUsedSpace()
|
err := s.checkUsedSpace()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var syncs []Sync
|
|
||||||
shouldInterruptLoop := false
|
shouldInterruptLoop := false
|
||||||
|
|
||||||
isSingleChannelSync := s.syncProperties.YoutubeChannelID != ""
|
if s.CliFlags.IsSingleChannelSync() {
|
||||||
if isSingleChannelSync {
|
channels, err := s.ApiConfig.FetchChannels("", &s.CliFlags)
|
||||||
channels, err := s.apiConfig.FetchChannels("", s.syncProperties)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
if len(channels) != 1 {
|
if len(channels) != 1 {
|
||||||
return errors.Err("Expected 1 channel, %d returned", len(channels))
|
return errors.Err("Expected 1 channel, %d returned", len(channels))
|
||||||
}
|
}
|
||||||
lbryChannelName := channels[0].DesiredChannelName
|
s.enqueueChannel(&channels[0])
|
||||||
syncs = make([]Sync, 1)
|
|
||||||
syncs[0] = Sync{
|
|
||||||
APIConfig: s.apiConfig,
|
|
||||||
YoutubeChannelID: s.syncProperties.YoutubeChannelID,
|
|
||||||
LbryChannelName: lbryChannelName,
|
|
||||||
lbryChannelID: channels[0].ChannelClaimID,
|
|
||||||
StopOnError: s.stopOnError,
|
|
||||||
MaxTries: s.maxTries,
|
|
||||||
ConcurrentVideos: s.concurrentVideos,
|
|
||||||
TakeOverExistingChannel: s.takeOverExistingChannel,
|
|
||||||
Refill: s.refill,
|
|
||||||
Manager: s,
|
|
||||||
LbrycrdString: s.lbrycrdString,
|
|
||||||
AwsS3ID: s.awsS3ID,
|
|
||||||
AwsS3Secret: s.awsS3Secret,
|
|
||||||
AwsS3Region: s.awsS3Region,
|
|
||||||
AwsS3Bucket: s.awsS3Bucket,
|
|
||||||
namer: namer.NewNamer(),
|
|
||||||
Fee: channels[0].Fee,
|
|
||||||
}
|
|
||||||
shouldInterruptLoop = true
|
shouldInterruptLoop = true
|
||||||
} else {
|
} else {
|
||||||
var queuesToSync []string
|
var queuesToSync []string
|
||||||
//TODO: implement scrambling to avoid starvation of queues
|
if s.CliFlags.Status != "" {
|
||||||
if s.syncStatus != "" {
|
queuesToSync = append(queuesToSync, shared.StatusSyncing, s.CliFlags.Status)
|
||||||
queuesToSync = append(queuesToSync, s.syncStatus)
|
} else if s.CliFlags.SyncUpdate {
|
||||||
} else if s.syncUpdate {
|
queuesToSync = append(queuesToSync, shared.StatusSyncing, shared.StatusSynced)
|
||||||
queuesToSync = append(queuesToSync, StatusSyncing, StatusSynced)
|
|
||||||
} else {
|
} else {
|
||||||
queuesToSync = append(queuesToSync, StatusSyncing, StatusQueued)
|
queuesToSync = append(queuesToSync, shared.StatusSyncing, shared.StatusQueued)
|
||||||
}
|
}
|
||||||
|
if s.CliFlags.SecondaryStatus != "" {
|
||||||
|
queuesToSync = append(queuesToSync, s.CliFlags.SecondaryStatus)
|
||||||
|
}
|
||||||
|
queues:
|
||||||
for _, q := range queuesToSync {
|
for _, q := range queuesToSync {
|
||||||
//temporary override for sync-until to give tom the time to review the channels
|
channels, err := s.ApiConfig.FetchChannels(q, &s.CliFlags)
|
||||||
if q == StatusQueued {
|
|
||||||
s.syncProperties.SyncUntil = time.Now().AddDate(0, 0, -1).Unix()
|
|
||||||
}
|
|
||||||
channels, err := s.apiConfig.FetchChannels(q, s.syncProperties)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Infof("There are %d channels in the \"%s\" queue", len(channels), q)
|
log.Infof("Currently processing the \"%s\" queue with %d channels", q, len(channels))
|
||||||
if len(channels) > 0 {
|
for _, c := range channels {
|
||||||
c := channels[0]
|
s.enqueueChannel(&c)
|
||||||
syncs = append(syncs, Sync{
|
queueAll := q == shared.StatusFailed || q == shared.StatusSyncing
|
||||||
APIConfig: s.apiConfig,
|
if !queueAll {
|
||||||
YoutubeChannelID: c.ChannelId,
|
break queues
|
||||||
LbryChannelName: c.DesiredChannelName,
|
|
||||||
lbryChannelID: c.ChannelClaimID,
|
|
||||||
StopOnError: s.stopOnError,
|
|
||||||
MaxTries: s.maxTries,
|
|
||||||
ConcurrentVideos: s.concurrentVideos,
|
|
||||||
TakeOverExistingChannel: s.takeOverExistingChannel,
|
|
||||||
Refill: s.refill,
|
|
||||||
Manager: s,
|
|
||||||
LbrycrdString: s.lbrycrdString,
|
|
||||||
AwsS3ID: s.awsS3ID,
|
|
||||||
AwsS3Secret: s.awsS3Secret,
|
|
||||||
AwsS3Region: s.awsS3Region,
|
|
||||||
AwsS3Bucket: s.awsS3Bucket,
|
|
||||||
namer: namer.NewNamer(),
|
|
||||||
Fee: c.Fee,
|
|
||||||
})
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
log.Infof("Drained the \"%s\" queue", q)
|
||||||
}
|
}
|
||||||
if len(syncs) == 0 {
|
}
|
||||||
|
if len(s.channelsToSync) == 0 {
|
||||||
log.Infoln("No channels to sync. Pausing 5 minutes!")
|
log.Infoln("No channels to sync. Pausing 5 minutes!")
|
||||||
time.Sleep(5 * time.Minute)
|
time.Sleep(5 * time.Minute)
|
||||||
}
|
}
|
||||||
for _, sync := range syncs {
|
for _, sync := range s.channelsToSync {
|
||||||
|
if lastChannelProcessed == sync.DbChannelData.ChannelId && secondLastChannelProcessed == lastChannelProcessed {
|
||||||
|
util.SendToSlack("We just killed a sync for %s to stop looping! (%s)", sync.DbChannelData.DesiredChannelName, sync.DbChannelData.ChannelId)
|
||||||
|
stopTheLoops := errors.Err("Found channel %s running 3 times, set it to failed, and reprocess later", sync.DbChannelData.DesiredChannelName)
|
||||||
|
sync.setChannelTerminationStatus(&stopTheLoops)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
secondLastChannelProcessed = lastChannelProcessed
|
||||||
|
lastChannelProcessed = sync.DbChannelData.ChannelId
|
||||||
shouldNotCount := false
|
shouldNotCount := false
|
||||||
logUtils.SendInfoToSlack("Syncing %s (%s) to LBRY! total processed channels since startup: %d", sync.LbryChannelName, sync.YoutubeChannelID, syncCount+1)
|
logUtils.SendInfoToSlack("Syncing %s (%s) to LBRY! total processed channels since startup: %d", sync.DbChannelData.DesiredChannelName, sync.DbChannelData.ChannelId, syncCount+1)
|
||||||
err := sync.FullCycle()
|
err := sync.FullCycle()
|
||||||
|
//TODO: THIS IS A TEMPORARY WORK AROUND FOR THE STUPID IP LOCKUP BUG
|
||||||
|
ipPool, _ := ip_manager.GetIPPool(sync.grp)
|
||||||
|
if ipPool != nil {
|
||||||
|
ipPool.ReleaseAll()
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "quotaExceeded") {
|
||||||
|
logUtils.SleepUntilQuotaReset()
|
||||||
|
}
|
||||||
fatalErrors := []string{
|
fatalErrors := []string{
|
||||||
"default_wallet already exists",
|
"default_wallet already exists",
|
||||||
"WALLET HAS NOT BEEN MOVED TO THE WALLET BACKUP DIR",
|
"WALLET HAS NOT BEEN MOVED TO THE WALLET BACKUP DIR",
|
||||||
"NotEnoughFunds",
|
"NotEnoughFunds",
|
||||||
"no space left on device",
|
"no space left on device",
|
||||||
"failure uploading wallet",
|
"there was a problem uploading the wallet",
|
||||||
"the channel in the wallet is different than the channel in the database",
|
"the channel in the wallet is different than the channel in the database",
|
||||||
"this channel does not belong to this wallet!",
|
"this channel does not belong to this wallet!",
|
||||||
|
"You already have a stream claim published under the name",
|
||||||
}
|
}
|
||||||
|
|
||||||
if util.SubstringInSlice(err.Error(), fatalErrors) {
|
if util.SubstringInSlice(err.Error(), fatalErrors) {
|
||||||
return errors.Prefix("@Nikooo777 this requires manual intervention! Exiting...", err)
|
return errors.Prefix("@Nikooo777 this requires manual intervention! Exiting...", err)
|
||||||
}
|
}
|
||||||
shouldNotCount = strings.Contains(err.Error(), "this youtube channel is being managed by another server")
|
shouldNotCount = strings.Contains(err.Error(), "this youtube channel is being managed by another server")
|
||||||
if !shouldNotCount {
|
if !shouldNotCount {
|
||||||
logUtils.SendInfoToSlack("A non fatal error was reported by the sync process. %s\nContinuing...", err.Error())
|
logUtils.SendInfoToSlack("A non fatal error was reported by the sync process.\n%s", errors.FullTrace(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
err = logUtils.CleanupMetadata()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("something went wrong while trying to clear out the video metadata directory: %s", errors.FullTrace(err))
|
||||||
|
}
|
||||||
err = blobs_reflector.ReflectAndClean()
|
err = blobs_reflector.ReflectAndClean()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Prefix("@Nikooo777 something went wrong while reflecting blobs", err)
|
return errors.Prefix("@Nikooo777 something went wrong while reflecting blobs", err)
|
||||||
}
|
}
|
||||||
logUtils.SendInfoToSlack("Syncing %s (%s) reached an end. total processed channels since startup: %d", sync.LbryChannelName, sync.YoutubeChannelID, syncCount+1)
|
logUtils.SendInfoToSlack("%s (%s) reached an end. Total processed channels since startup: %d", sync.DbChannelData.DesiredChannelName, sync.DbChannelData.ChannelId, syncCount+1)
|
||||||
if !shouldNotCount {
|
if !shouldNotCount {
|
||||||
syncCount++
|
syncCount++
|
||||||
}
|
}
|
||||||
if sync.IsInterrupted() || (s.limit != 0 && syncCount >= s.limit) {
|
if sync.IsInterrupted() || (s.CliFlags.Limit != 0 && syncCount >= s.CliFlags.Limit) {
|
||||||
shouldInterruptLoop = true
|
shouldInterruptLoop = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if shouldInterruptLoop || s.singleRun {
|
if shouldInterruptLoop || s.CliFlags.SingleRun {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (s *SyncManager) GetS3AWSConfig() aws.Config {
|
|
||||||
return aws.Config{
|
|
||||||
Credentials: credentials.NewStaticCredentials(s.awsS3ID, s.awsS3Secret, ""),
|
|
||||||
Region: &s.awsS3Region,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func (s *SyncManager) checkUsedSpace() error {
|
func (s *SyncManager) checkUsedSpace() error {
|
||||||
usedPctile, err := GetUsedSpace(logUtils.GetBlobsDir())
|
usedPctile, err := GetUsedSpace(logUtils.GetBlobsDir())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
if usedPctile >= 0.90 && !s.skipSpaceCheck {
|
if usedPctile >= 0.90 && !s.CliFlags.SkipSpaceCheck {
|
||||||
return errors.Err(fmt.Sprintf("more than 90%% of the space has been used. use --skip-space-check to ignore. Used: %.1f%%", usedPctile*100))
|
return errors.Err(fmt.Sprintf("more than 90%% of the space has been used. use --skip-space-check to ignore. Used: %.1f%%", usedPctile*100))
|
||||||
}
|
}
|
||||||
log.Infof("disk usage: %.1f%%", usedPctile*100)
|
log.Infof("disk usage: %.1f%%", usedPctile*100)
|
||||||
|
|
285
manager/s3_storage.go
Normal file
285
manager/s3_storage.go
Normal file
|
@ -0,0 +1,285 @@
|
||||||
|
package manager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lbryio/ytsync/v5/configs"
|
||||||
|
"github.com/lbryio/ytsync/v5/util"
|
||||||
|
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Sync) getS3Downloader(config *aws.Config) (*s3manager.Downloader, error) {
|
||||||
|
s3Session, err := session.NewSession(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Prefix("error starting session", err)
|
||||||
|
}
|
||||||
|
downloader := s3manager.NewDownloader(s3Session)
|
||||||
|
return downloader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sync) getS3Uploader(config *aws.Config) (*s3manager.Uploader, error) {
|
||||||
|
s3Session, err := session.NewSession(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Prefix("error starting session", err)
|
||||||
|
}
|
||||||
|
uploader := s3manager.NewUploader(s3Session)
|
||||||
|
return uploader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sync) downloadWallet() error {
|
||||||
|
defaultWalletDir, defaultTempWalletDir, key, err := s.getWalletPaths()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
downloader, err := s.getS3Downloader(configs.Configuration.WalletS3Config.GetS3AWSConfig())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out, err := os.Create(defaultTempWalletDir)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Prefix("error creating temp wallet", err)
|
||||||
|
}
|
||||||
|
defer out.Close()
|
||||||
|
|
||||||
|
bytesWritten, err := downloader.Download(out, &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(configs.Configuration.WalletS3Config.Bucket),
|
||||||
|
Key: key,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// Casting to the awserr.Error type will allow you to inspect the error
|
||||||
|
// code returned by the service in code. The error code can be used
|
||||||
|
// to switch on context specific functionality. In this case a context
|
||||||
|
// specific error message is printed to the user based on the bucket
|
||||||
|
// and key existing.
|
||||||
|
//
|
||||||
|
// For information on other S3 API error codes see:
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||||
|
if aerr, ok := err.(awserr.Error); ok {
|
||||||
|
code := aerr.Code()
|
||||||
|
if code == s3.ErrCodeNoSuchKey {
|
||||||
|
return errors.Err("wallet not on S3")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
} else if bytesWritten == 0 {
|
||||||
|
return errors.Err("zero bytes written")
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Rename(defaultTempWalletDir, defaultWalletDir)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Prefix("error replacing temp wallet for default wallet", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sync) downloadBlockchainDB() error {
|
||||||
|
if util.IsRegTest() {
|
||||||
|
return nil // tests fail if we re-use the same blockchain DB
|
||||||
|
}
|
||||||
|
defaultBDBPath, defaultTempBDBPath, key, err := s.getBlockchainDBPaths()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
files, err := filepath.Glob(defaultBDBPath + "*")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
for _, f := range files {
|
||||||
|
err = os.Remove(f)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s.DbChannelData.WipeDB {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
downloader, err := s.getS3Downloader(configs.Configuration.BlockchaindbS3Config.GetS3AWSConfig())
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
out, err := os.Create(defaultTempBDBPath)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Prefix("error creating temp blockchain DB file", err)
|
||||||
|
}
|
||||||
|
defer out.Close()
|
||||||
|
|
||||||
|
bytesWritten, err := downloader.Download(out, &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(configs.Configuration.BlockchaindbS3Config.Bucket),
|
||||||
|
Key: key,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// Casting to the awserr.Error type will allow you to inspect the error
|
||||||
|
// code returned by the service in code. The error code can be used
|
||||||
|
// to switch on context specific functionality. In this case a context
|
||||||
|
// specific error message is printed to the user based on the bucket
|
||||||
|
// and key existing.
|
||||||
|
//
|
||||||
|
// For information on other S3 API error codes see:
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||||
|
if aerr, ok := err.(awserr.Error); ok {
|
||||||
|
code := aerr.Code()
|
||||||
|
if code == s3.ErrCodeNoSuchKey {
|
||||||
|
return nil // let ytsync sync the database by itself
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errors.Err(err)
|
||||||
|
} else if bytesWritten == 0 {
|
||||||
|
return errors.Err("zero bytes written")
|
||||||
|
}
|
||||||
|
|
||||||
|
blockchainDbDir := strings.Replace(defaultBDBPath, "blockchain.db", "", -1)
|
||||||
|
err = util.Untar(defaultTempBDBPath, blockchainDbDir)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Prefix("error extracting blockchain.db files", err)
|
||||||
|
}
|
||||||
|
err = os.Remove(defaultTempBDBPath)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
log.Printf("blockchain.db data downloaded and extracted to %s", blockchainDbDir)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sync) getWalletPaths() (defaultWallet, tempWallet string, key *string, err error) {
|
||||||
|
defaultWallet = os.Getenv("HOME") + "/.lbryum/wallets/default_wallet"
|
||||||
|
tempWallet = os.Getenv("HOME") + "/.lbryum/wallets/tmp_wallet"
|
||||||
|
key = aws.String("/wallets/" + s.DbChannelData.ChannelId)
|
||||||
|
if util.IsRegTest() {
|
||||||
|
defaultWallet = os.Getenv("HOME") + "/.lbryum_regtest/wallets/default_wallet"
|
||||||
|
tempWallet = os.Getenv("HOME") + "/.lbryum_regtest/wallets/tmp_wallet"
|
||||||
|
key = aws.String("/regtest/" + s.DbChannelData.ChannelId)
|
||||||
|
}
|
||||||
|
|
||||||
|
lbryumDir := os.Getenv("LBRYUM_DIR")
|
||||||
|
if lbryumDir != "" {
|
||||||
|
defaultWallet = lbryumDir + "/wallets/default_wallet"
|
||||||
|
tempWallet = lbryumDir + "/wallets/tmp_wallet"
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(defaultWallet); !os.IsNotExist(err) {
|
||||||
|
return "", "", nil, errors.Err("default_wallet already exists")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sync) getBlockchainDBPaths() (defaultDB, tempDB string, key *string, err error) {
|
||||||
|
lbryumDir := os.Getenv("LBRYUM_DIR")
|
||||||
|
if lbryumDir == "" {
|
||||||
|
if util.IsRegTest() {
|
||||||
|
lbryumDir = os.Getenv("HOME") + "/.lbryum_regtest"
|
||||||
|
} else {
|
||||||
|
lbryumDir = os.Getenv("HOME") + "/.lbryum"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defaultDB = lbryumDir + "/lbc_mainnet/blockchain.db"
|
||||||
|
tempDB = lbryumDir + "/lbc_mainnet/tmp_blockchain.tar"
|
||||||
|
key = aws.String("/blockchain_dbs/" + s.DbChannelData.ChannelId + ".tar")
|
||||||
|
if util.IsRegTest() {
|
||||||
|
defaultDB = lbryumDir + "/lbc_regtest/blockchain.db"
|
||||||
|
tempDB = lbryumDir + "/lbc_regtest/tmp_blockchain.tar"
|
||||||
|
key = aws.String("/regtest_dbs/" + s.DbChannelData.ChannelId + ".tar")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sync) uploadWallet() error {
|
||||||
|
defaultWalletDir := util.GetDefaultWalletPath()
|
||||||
|
key := aws.String("/wallets/" + s.DbChannelData.ChannelId)
|
||||||
|
if util.IsRegTest() {
|
||||||
|
key = aws.String("/regtest/" + s.DbChannelData.ChannelId)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(defaultWalletDir); os.IsNotExist(err) {
|
||||||
|
return errors.Err("default_wallet does not exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
uploader, err := s.getS3Uploader(configs.Configuration.WalletS3Config.GetS3AWSConfig())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := os.Open(defaultWalletDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
for time.Since(start) < 30*time.Minute {
|
||||||
|
_, err = uploader.Upload(&s3manager.UploadInput{
|
||||||
|
Bucket: aws.String(configs.Configuration.WalletS3Config.Bucket),
|
||||||
|
Key: key,
|
||||||
|
Body: file,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return errors.Prefix("there was a problem uploading the wallet to S3", errors.Err(err))
|
||||||
|
}
|
||||||
|
log.Println("wallet uploaded to S3")
|
||||||
|
|
||||||
|
return os.Remove(defaultWalletDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sync) uploadBlockchainDB() error {
|
||||||
|
defaultBDBDir, _, key, err := s.getBlockchainDBPaths()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(defaultBDBDir); os.IsNotExist(err) {
|
||||||
|
return errors.Err("blockchain.db does not exist")
|
||||||
|
}
|
||||||
|
files, err := filepath.Glob(defaultBDBDir + "*")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
tarPath := strings.Replace(defaultBDBDir, "blockchain.db", "", -1) + s.DbChannelData.ChannelId + ".tar"
|
||||||
|
err = util.CreateTarball(tarPath, files)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
uploader, err := s.getS3Uploader(configs.Configuration.BlockchaindbS3Config.GetS3AWSConfig())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := os.Open(tarPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
_, err = uploader.Upload(&s3manager.UploadInput{
|
||||||
|
Bucket: aws.String(configs.Configuration.BlockchaindbS3Config.Bucket),
|
||||||
|
Key: key,
|
||||||
|
Body: file,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Println("blockchain.db files uploaded to S3")
|
||||||
|
err = os.Remove(tarPath)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
return os.Remove(defaultBDBDir)
|
||||||
|
}
|
387
manager/setup.go
387
manager/setup.go
|
@ -3,32 +3,41 @@ package manager
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/extras/errors"
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
"github.com/lbryio/lbry.go/extras/jsonrpc"
|
"github.com/lbryio/lbry.go/v2/extras/jsonrpc"
|
||||||
"github.com/lbryio/lbry.go/extras/util"
|
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||||
"github.com/lbryio/ytsync/tagsManager"
|
"github.com/lbryio/ytsync/v5/shared"
|
||||||
"github.com/lbryio/ytsync/thumbs"
|
"github.com/lbryio/ytsync/v5/timing"
|
||||||
logUtils "github.com/lbryio/ytsync/util"
|
logUtils "github.com/lbryio/ytsync/v5/util"
|
||||||
|
"github.com/lbryio/ytsync/v5/ytapi"
|
||||||
|
|
||||||
|
"github.com/lbryio/ytsync/v5/tags_manager"
|
||||||
|
"github.com/lbryio/ytsync/v5/thumbs"
|
||||||
|
|
||||||
"github.com/shopspring/decimal"
|
"github.com/shopspring/decimal"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"google.golang.org/api/googleapi/transport"
|
|
||||||
"google.golang.org/api/youtube/v3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Sync) enableAddressReuse() error {
|
func (s *Sync) enableAddressReuse() error {
|
||||||
accountsResponse, err := s.daemon.AccountList()
|
accountsResponse, err := s.daemon.AccountList(1, 50)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
accounts := accountsResponse.LBCMainnet
|
accounts := make([]jsonrpc.Account, 0, len(accountsResponse.Items))
|
||||||
|
ledger := "lbc_mainnet"
|
||||||
if logUtils.IsRegTest() {
|
if logUtils.IsRegTest() {
|
||||||
accounts = accountsResponse.LBCRegtest
|
ledger = "lbc_regtest"
|
||||||
}
|
}
|
||||||
|
for _, a := range accountsResponse.Items {
|
||||||
|
if *a.Ledger == ledger {
|
||||||
|
accounts = append(accounts, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, a := range accounts {
|
for _, a := range accounts {
|
||||||
_, err = s.daemon.AccountSet(a.ID, jsonrpc.AccountSettings{
|
_, err = s.daemon.AccountSet(a.ID, jsonrpc.AccountSettings{
|
||||||
ChangeMaxUses: util.PtrToInt(1000),
|
ChangeMaxUses: util.PtrToInt(1000),
|
||||||
|
@ -41,6 +50,10 @@ func (s *Sync) enableAddressReuse() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (s *Sync) walletSetup() error {
|
func (s *Sync) walletSetup() error {
|
||||||
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("walletSetup").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
//prevent unnecessary concurrent execution and publishing while refilling/reallocating UTXOs
|
//prevent unnecessary concurrent execution and publishing while refilling/reallocating UTXOs
|
||||||
s.walletMux.Lock()
|
s.walletMux.Lock()
|
||||||
defer s.walletMux.Unlock()
|
defer s.walletMux.Unlock()
|
||||||
|
@ -55,17 +68,13 @@ func (s *Sync) walletSetup() error {
|
||||||
} else if balanceResp == nil {
|
} else if balanceResp == nil {
|
||||||
return errors.Err("no response")
|
return errors.Err("no response")
|
||||||
}
|
}
|
||||||
balance, err := strconv.ParseFloat((string)(*balanceResp), 64)
|
balance, err := strconv.ParseFloat(balanceResp.Available.String(), 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
log.Debugf("Starting balance is %.4f", balance)
|
log.Debugf("Starting balance is %.4f", balance)
|
||||||
|
|
||||||
n, err := s.CountVideos()
|
videosOnYoutube := int(s.DbChannelData.TotalVideos)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
videosOnYoutube := int(n)
|
|
||||||
|
|
||||||
log.Debugf("Source channel has %d videos", videosOnYoutube)
|
log.Debugf("Source channel has %d videos", videosOnYoutube)
|
||||||
if videosOnYoutube == 0 {
|
if videosOnYoutube == 0 {
|
||||||
|
@ -90,22 +99,30 @@ func (s *Sync) walletSetup() error {
|
||||||
|
|
||||||
log.Debugf("We already allocated credits for %d published videos and %d failed videos", publishedCount, failedCount)
|
log.Debugf("We already allocated credits for %d published videos and %d failed videos", publishedCount, failedCount)
|
||||||
|
|
||||||
if videosOnYoutube > s.Manager.videosLimit {
|
if videosOnYoutube > s.Manager.CliFlags.VideosToSync(s.DbChannelData.TotalSubscribers) {
|
||||||
videosOnYoutube = s.Manager.videosLimit
|
videosOnYoutube = s.Manager.CliFlags.VideosToSync(s.DbChannelData.TotalSubscribers)
|
||||||
}
|
}
|
||||||
unallocatedVideos := videosOnYoutube - (publishedCount + failedCount)
|
unallocatedVideos := videosOnYoutube - (publishedCount + failedCount)
|
||||||
requiredBalance := float64(unallocatedVideos)*(publishAmount+estimatedMaxTxFee) + channelClaimAmount
|
if unallocatedVideos < 0 {
|
||||||
if s.Manager.upgradeMetadata {
|
unallocatedVideos = 0
|
||||||
requiredBalance += float64(notUpgradedCount) * 0.001
|
}
|
||||||
|
channelFee := channelClaimAmount
|
||||||
|
channelAlreadyClaimed := s.DbChannelData.ChannelClaimID != ""
|
||||||
|
if channelAlreadyClaimed {
|
||||||
|
channelFee = 0.0
|
||||||
|
}
|
||||||
|
requiredBalance := float64(unallocatedVideos)*(publishAmount+estimatedMaxTxFee) + channelFee
|
||||||
|
if s.Manager.CliFlags.UpgradeMetadata {
|
||||||
|
requiredBalance += float64(notUpgradedCount) * estimatedMaxTxFee
|
||||||
}
|
}
|
||||||
|
|
||||||
refillAmount := 0.0
|
refillAmount := 0.0
|
||||||
if balance < requiredBalance || balance < minimumAccountBalance {
|
if balance < requiredBalance || balance < minimumAccountBalance {
|
||||||
refillAmount = math.Max(requiredBalance-balance, minimumRefillAmount)
|
refillAmount = math.Max(math.Max(requiredBalance-balance, minimumAccountBalance-balance), minimumRefillAmount)
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.Refill > 0 {
|
if s.Manager.CliFlags.Refill > 0 {
|
||||||
refillAmount += float64(s.Refill)
|
refillAmount += float64(s.Manager.CliFlags.Refill)
|
||||||
}
|
}
|
||||||
|
|
||||||
if refillAmount > 0 {
|
if refillAmount > 0 {
|
||||||
|
@ -113,16 +130,25 @@ func (s *Sync) walletSetup() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
} else if balance > requiredBalance {
|
||||||
|
extraLBC := balance - requiredBalance
|
||||||
|
if extraLBC > 5 {
|
||||||
|
sendBackAmount := extraLBC - 1
|
||||||
|
logUtils.SendInfoToSlack("channel %s has %.1f credits which is %.1f more than it requires (%.1f). We should send at least %.1f that back.", s.DbChannelData.ChannelId, balance, extraLBC, requiredBalance, sendBackAmount)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
claimAddress, err := s.daemon.AddressList(nil)
|
claimAddress, err := s.daemon.AddressList(nil, nil, 1, 20)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else if claimAddress == nil {
|
} else if claimAddress == nil {
|
||||||
return errors.Err("could not get unused address")
|
return errors.Err("could not get an address")
|
||||||
}
|
}
|
||||||
s.claimAddress = string((*claimAddress)[0]) //TODO: remove claimAddress completely
|
if s.DbChannelData.PublishAddress.Address == "" || !s.shouldTransfer() {
|
||||||
if s.claimAddress == "" {
|
s.DbChannelData.PublishAddress.Address = string(claimAddress.Items[0].Address)
|
||||||
|
s.DbChannelData.PublishAddress.IsMine = true
|
||||||
|
}
|
||||||
|
if s.DbChannelData.PublishAddress.Address == "" {
|
||||||
return errors.Err("found blank claim address")
|
return errors.Err("found blank claim address")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,27 +160,47 @@ func (s *Sync) walletSetup() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sync) ensureEnoughUTXOs() error {
|
func (s *Sync) getDefaultAccount() (string, error) {
|
||||||
accounts, err := s.daemon.AccountList()
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("getDefaultAccount").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
|
if s.defaultAccountID == "" {
|
||||||
|
accountsResponse, err := s.daemon.AccountList(1, 50)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return "", errors.Err(err)
|
||||||
}
|
}
|
||||||
accountsNet := (*accounts).LBCMainnet
|
ledger := "lbc_mainnet"
|
||||||
if logUtils.IsRegTest() {
|
if logUtils.IsRegTest() {
|
||||||
accountsNet = (*accounts).LBCRegtest
|
ledger = "lbc_regtest"
|
||||||
}
|
}
|
||||||
defaultAccount := ""
|
for _, a := range accountsResponse.Items {
|
||||||
for _, account := range accountsNet {
|
if *a.Ledger == ledger {
|
||||||
if account.IsDefault {
|
if a.IsDefault {
|
||||||
defaultAccount = account.ID
|
s.defaultAccountID = a.ID
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if defaultAccount == "" {
|
|
||||||
return errors.Err("No default account found")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
utxolist, err := s.daemon.UTXOList(&defaultAccount)
|
if s.defaultAccountID == "" {
|
||||||
|
return "", errors.Err("No default account found")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s.defaultAccountID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sync) ensureEnoughUTXOs() error {
|
||||||
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("ensureEnoughUTXOs").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
|
defaultAccount, err := s.getDefaultAccount()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
utxolist, err := s.daemon.UTXOList(&defaultAccount, 1, 10000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else if utxolist == nil {
|
} else if utxolist == nil {
|
||||||
|
@ -166,9 +212,9 @@ func (s *Sync) ensureEnoughUTXOs() error {
|
||||||
count := 0
|
count := 0
|
||||||
confirmedCount := 0
|
confirmedCount := 0
|
||||||
|
|
||||||
for _, utxo := range *utxolist {
|
for _, utxo := range utxolist.Items {
|
||||||
amount, _ := strconv.ParseFloat(utxo.Amount, 64)
|
amount, _ := strconv.ParseFloat(utxo.Amount, 64)
|
||||||
if utxo.IsMine && utxo.Type == "payment" && amount > 0.001 {
|
if utxo.IsMyOutput && utxo.Type == "payment" && amount > 0.001 {
|
||||||
if utxo.Confirmations > 0 {
|
if utxo.Confirmations > 0 {
|
||||||
confirmedCount++
|
confirmedCount++
|
||||||
}
|
}
|
||||||
|
@ -185,16 +231,29 @@ func (s *Sync) ensureEnoughUTXOs() error {
|
||||||
return errors.Err("no response")
|
return errors.Err("no response")
|
||||||
}
|
}
|
||||||
|
|
||||||
balanceAmount, err := strconv.ParseFloat((string)(*balance), 64)
|
balanceAmount, err := strconv.ParseFloat(balance.Available.String(), 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
//this is dumb but sometimes the balance is negative and it breaks everything, so let's check again
|
||||||
|
if balanceAmount < 0 {
|
||||||
|
log.Infof("negative balance of %.2f found. Waiting to retry...", balanceAmount)
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
balanceAmount, err = strconv.ParseFloat(balance.Available.String(), 64)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
maxUTXOs := uint64(500)
|
maxUTXOs := uint64(500)
|
||||||
desiredUTXOCount := uint64(math.Floor((balanceAmount) / 0.1))
|
desiredUTXOCount := uint64(math.Floor((balanceAmount) / 0.1))
|
||||||
if desiredUTXOCount > maxUTXOs {
|
if desiredUTXOCount > maxUTXOs {
|
||||||
desiredUTXOCount = maxUTXOs
|
desiredUTXOCount = maxUTXOs
|
||||||
}
|
}
|
||||||
log.Infof("Splitting balance of %s evenly between %d UTXOs", *balance, desiredUTXOCount)
|
if desiredUTXOCount < uint64(confirmedCount) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
availableBalance, _ := balance.Available.Float64()
|
||||||
|
log.Infof("Splitting balance of %.3f evenly between %d UTXOs", availableBalance, desiredUTXOCount)
|
||||||
|
|
||||||
broadcastFee := 0.1
|
broadcastFee := 0.1
|
||||||
prefillTx, err := s.daemon.AccountFund(defaultAccount, defaultAccount, fmt.Sprintf("%.4f", balanceAmount-broadcastFee), desiredUTXOCount, false)
|
prefillTx, err := s.daemon.AccountFund(defaultAccount, defaultAccount, fmt.Sprintf("%.4f", balanceAmount-broadcastFee), desiredUTXOCount, false)
|
||||||
|
@ -221,16 +280,9 @@ func (s *Sync) ensureEnoughUTXOs() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sync) waitForNewBlock() error {
|
func (s *Sync) waitForNewBlock() error {
|
||||||
if logUtils.IsRegTest() && logUtils.IsUsingDocker() {
|
defer func(start time.Time) { timing.TimedComponent("waitForNewBlock").Add(time.Since(start)) }(time.Now())
|
||||||
lbrycrd, err := logUtils.GetLbrycrdClient(s.LbrycrdString)
|
|
||||||
if err != nil {
|
log.Printf("regtest: %t, docker: %t", logUtils.IsRegTest(), logUtils.IsUsingDocker())
|
||||||
return errors.Prefix("error getting lbrycrd client: ", err)
|
|
||||||
}
|
|
||||||
txs, err := lbrycrd.Generate(1)
|
|
||||||
for _, tx := range txs {
|
|
||||||
log.Info("Generated tx: ", tx.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
status, err := s.daemon.Status()
|
status, err := s.daemon.Status()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -243,72 +295,91 @@ func (s *Sync) waitForNewBlock() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
currentBlock := status.Wallet.Blocks
|
currentBlock := status.Wallet.Blocks
|
||||||
for i := 0; status.Wallet.Blocks <= currentBlock; i++ {
|
for i := 0; status.Wallet.Blocks <= currentBlock; i++ {
|
||||||
if i%3 == 0 {
|
if i%3 == 0 {
|
||||||
log.Printf("Waiting for new block (%d)...", currentBlock+1)
|
log.Printf("Waiting for new block (%d)...", currentBlock+1)
|
||||||
}
|
}
|
||||||
|
if logUtils.IsRegTest() && logUtils.IsUsingDocker() {
|
||||||
|
err = s.GenerateRegtestBlock()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
status, err = s.daemon.Status()
|
status, err = s.daemon.Status()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sync) GenerateRegtestBlock() error {
|
||||||
|
lbrycrd, err := logUtils.GetLbrycrdClient(s.Manager.LbrycrdDsn)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Prefix("error getting lbrycrd client", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
txs, err := lbrycrd.Generate(1)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Prefix("error generating new block", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tx := range txs {
|
||||||
|
log.Info("Generated tx: ", tx.String())
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sync) ensureChannelOwnership() error {
|
func (s *Sync) ensureChannelOwnership() error {
|
||||||
if s.LbryChannelName == "" {
|
defer func(start time.Time) { timing.TimedComponent("ensureChannelOwnership").Add(time.Since(start)) }(time.Now())
|
||||||
|
|
||||||
|
if s.DbChannelData.DesiredChannelName == "" {
|
||||||
return errors.Err("no channel name set")
|
return errors.Err("no channel name set")
|
||||||
}
|
}
|
||||||
//@TODO: get rid of this when imported channels are supported
|
|
||||||
if s.YoutubeChannelID == "UCW-thz5HxE-goYq8yPds1Gw" {
|
channels, err := s.daemon.ChannelList(nil, 1, 500, nil)
|
||||||
return nil
|
|
||||||
}
|
|
||||||
channels, err := s.daemon.ChannelList(nil, 1, 50)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else if channels == nil {
|
} else if channels == nil {
|
||||||
return errors.Err("no channel response")
|
return errors.Err("no channel response")
|
||||||
}
|
}
|
||||||
//special case for wallets we don't retain full control anymore
|
|
||||||
if len((*channels).Items) > 1 {
|
var channelToUse *jsonrpc.Transaction
|
||||||
// This wallet is probably not under our control anymore but we still want to publish to it
|
if len((*channels).Items) > 0 {
|
||||||
// here we shall check if within all the channels there is one that was created by ytsync
|
if s.DbChannelData.ChannelClaimID == "" {
|
||||||
logUtils.SendInfoToSlack("we are dealing with a wallet that has multiple channels. This indicates that the wallet was probably transferred but we still want to sync their content. YoutubeID: %s", s.YoutubeChannelID)
|
|
||||||
if s.lbryChannelID == "" {
|
|
||||||
return errors.Err("this channel does not have a recorded claimID in the database. To prevent failures, updates are not supported until an entry is manually added in the database")
|
return errors.Err("this channel does not have a recorded claimID in the database. To prevent failures, updates are not supported until an entry is manually added in the database")
|
||||||
}
|
}
|
||||||
for _, c := range (*channels).Items {
|
for _, c := range (*channels).Items {
|
||||||
if c.ClaimID != s.lbryChannelID {
|
log.Debugf("checking listed channel %s (%s)", c.ClaimID, c.Name)
|
||||||
if c.Name != s.LbryChannelName {
|
if c.ClaimID != s.DbChannelData.ChannelClaimID {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c.Name != s.DbChannelData.DesiredChannelName {
|
||||||
return errors.Err("the channel in the wallet is different than the channel in the database")
|
return errors.Err("the channel in the wallet is different than the channel in the database")
|
||||||
}
|
}
|
||||||
return nil // we have the ytsync channel and both the claimID and the channelName from the database are correct
|
channelToUse = &c
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
if channelToUse == nil {
|
||||||
|
return errors.Err("this wallet has channels but not a single one is ours! Expected claim_id: %s (%s)", s.DbChannelData.ChannelClaimID, s.DbChannelData.DesiredChannelName)
|
||||||
}
|
}
|
||||||
}
|
} else if s.DbChannelData.TransferState == shared.TransferStateComplete {
|
||||||
channelUsesOldMetadata := false
|
return errors.Err("the channel was transferred but appears to have been abandoned!")
|
||||||
if len((*channels).Items) == 1 {
|
} else if s.DbChannelData.ChannelClaimID != "" {
|
||||||
channel := ((*channels).Items)[0]
|
return errors.Err("the database has a channel recorded (%s) but nothing was found in our control", s.DbChannelData.ChannelClaimID)
|
||||||
if channel.Name == s.LbryChannelName {
|
|
||||||
channelUsesOldMetadata = channel.Value.GetThumbnail() == nil
|
|
||||||
//TODO: eventually get rid of this when the whole db is filled
|
|
||||||
if s.lbryChannelID == "" {
|
|
||||||
err = s.Manager.apiConfig.SetChannelClaimID(s.YoutubeChannelID, channel.ClaimID)
|
|
||||||
} else if channel.ClaimID != s.lbryChannelID {
|
|
||||||
return errors.Err("the channel in the wallet is different than the channel in the database")
|
|
||||||
}
|
|
||||||
s.lbryChannelID = channel.ClaimID
|
|
||||||
if !channelUsesOldMetadata {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return errors.Err("this channel does not belong to this wallet! Expected: %s, found: %s", s.LbryChannelName, channel.Name)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
channelBidAmount := channelClaimAmount
|
channelUsesOldMetadata := false
|
||||||
|
if channelToUse != nil {
|
||||||
|
channelUsesOldMetadata = channelToUse.Value.GetThumbnail() == nil || (len(channelToUse.Value.GetLanguages()) == 0 && s.DbChannelData.Language != "")
|
||||||
|
if !channelUsesOldMetadata {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
balanceResp, err := s.daemon.AccountBalance(nil)
|
balanceResp, err := s.daemon.AccountBalance(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -316,47 +387,43 @@ func (s *Sync) ensureChannelOwnership() error {
|
||||||
} else if balanceResp == nil {
|
} else if balanceResp == nil {
|
||||||
return errors.Err("no response")
|
return errors.Err("no response")
|
||||||
}
|
}
|
||||||
balance, err := decimal.NewFromString((string)(*balanceResp))
|
balance, err := decimal.NewFromString(balanceResp.Available.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if balance.LessThan(decimal.NewFromFloat(channelBidAmount)) {
|
if balance.LessThan(decimal.NewFromFloat(channelClaimAmount)) {
|
||||||
err = s.addCredits(channelBidAmount + 0.1)
|
err = s.addCredits(channelClaimAmount + estimatedMaxTxFee*3)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
client := &http.Client{
|
|
||||||
Transport: &transport.APIKey{Key: s.APIConfig.YoutubeAPIKey},
|
|
||||||
}
|
|
||||||
|
|
||||||
service, err := youtube.New(client)
|
channelInfo, err := ytapi.ChannelInfo(s.DbChannelData.ChannelId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Prefix("error creating YouTube service", err)
|
if strings.Contains(err.Error(), "invalid character 'e' looking for beginning of value") {
|
||||||
}
|
logUtils.SendInfoToSlack("failed to get channel data for %s. Waiting 1 minute to retry", s.DbChannelData.ChannelId)
|
||||||
|
time.Sleep(1 * time.Minute)
|
||||||
response, err := service.Channels.List("snippet,brandingSettings").Id(s.YoutubeChannelID).Do()
|
channelInfo, err = ytapi.ChannelInfo(s.DbChannelData.ChannelId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Prefix("error getting channel details", err)
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(response.Items) < 1 {
|
thumbnail := channelInfo.Header.C4TabbedHeaderRenderer.Avatar.Thumbnails[len(channelInfo.Header.C4TabbedHeaderRenderer.Avatar.Thumbnails)-1].URL
|
||||||
return errors.Err("youtube channel not found")
|
thumbnailURL, err := thumbs.MirrorThumbnail(thumbnail, s.DbChannelData.ChannelId)
|
||||||
}
|
|
||||||
|
|
||||||
channelInfo := response.Items[0].Snippet
|
|
||||||
channelBranding := response.Items[0].BrandingSettings
|
|
||||||
|
|
||||||
thumbnail := thumbs.GetBestThumbnail(channelInfo.Thumbnails)
|
|
||||||
thumbnailURL, err := thumbs.MirrorThumbnail(thumbnail.Url, s.YoutubeChannelID, s.Manager.GetS3AWSConfig())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var bannerURL *string
|
var bannerURL *string
|
||||||
if channelBranding.Image != nil && channelBranding.Image.BannerImageUrl != "" {
|
if channelInfo.Header.C4TabbedHeaderRenderer.Banner.Thumbnails != nil {
|
||||||
bURL, err := thumbs.MirrorThumbnail(channelBranding.Image.BannerImageUrl, "banner-"+s.YoutubeChannelID, s.Manager.GetS3AWSConfig())
|
bURL, err := thumbs.MirrorThumbnail(channelInfo.Header.C4TabbedHeaderRenderer.Banner.Thumbnails[len(channelInfo.Header.C4TabbedHeaderRenderer.Banner.Thumbnails)-1].URL,
|
||||||
|
"banner-"+s.DbChannelData.ChannelId,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -364,72 +431,108 @@ func (s *Sync) ensureChannelOwnership() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var languages []string = nil
|
var languages []string = nil
|
||||||
if channelInfo.DefaultLanguage != "" {
|
if s.DbChannelData.Language != "" {
|
||||||
languages = []string{channelInfo.DefaultLanguage}
|
languages = []string{s.DbChannelData.Language}
|
||||||
}
|
}
|
||||||
|
|
||||||
var locations []jsonrpc.Location = nil
|
var locations []jsonrpc.Location = nil
|
||||||
if channelInfo.Country != "" {
|
if channelInfo.Topbar.DesktopTopbarRenderer.CountryCode != "" {
|
||||||
locations = []jsonrpc.Location{{Country: util.PtrToString(channelInfo.Country)}}
|
locations = []jsonrpc.Location{{Country: &channelInfo.Topbar.DesktopTopbarRenderer.CountryCode}}
|
||||||
}
|
}
|
||||||
var c *jsonrpc.TransactionSummary
|
var c *jsonrpc.TransactionSummary
|
||||||
|
var recoveredChannelClaimID string
|
||||||
claimCreateOptions := jsonrpc.ClaimCreateOptions{
|
claimCreateOptions := jsonrpc.ClaimCreateOptions{
|
||||||
Title: &channelInfo.Title,
|
Title: &channelInfo.Microformat.MicroformatDataRenderer.Title,
|
||||||
Description: &channelInfo.Description,
|
Description: &channelInfo.Metadata.ChannelMetadataRenderer.Description,
|
||||||
Tags: tagsManager.GetTagsForChannel(s.YoutubeChannelID),
|
Tags: tags_manager.GetTagsForChannel(s.DbChannelData.ChannelId),
|
||||||
Languages: languages,
|
Languages: languages,
|
||||||
Locations: locations,
|
Locations: locations,
|
||||||
ThumbnailURL: &thumbnailURL,
|
ThumbnailURL: &thumbnailURL,
|
||||||
}
|
}
|
||||||
if channelUsesOldMetadata {
|
if channelUsesOldMetadata {
|
||||||
c, err = s.daemon.ChannelUpdate(s.lbryChannelID, jsonrpc.ChannelUpdateOptions{
|
da, err := s.getDefaultAccount()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if s.DbChannelData.TransferState <= 1 {
|
||||||
|
c, err = s.daemon.ChannelUpdate(s.DbChannelData.ChannelClaimID, jsonrpc.ChannelUpdateOptions{
|
||||||
ClearTags: util.PtrToBool(true),
|
ClearTags: util.PtrToBool(true),
|
||||||
ClearLocations: util.PtrToBool(true),
|
ClearLocations: util.PtrToBool(true),
|
||||||
ClearLanguages: util.PtrToBool(true),
|
ClearLanguages: util.PtrToBool(true),
|
||||||
ChannelCreateOptions: jsonrpc.ChannelCreateOptions{
|
ChannelCreateOptions: jsonrpc.ChannelCreateOptions{
|
||||||
|
AccountID: &da,
|
||||||
|
FundingAccountIDs: []string{
|
||||||
|
da,
|
||||||
|
},
|
||||||
ClaimCreateOptions: claimCreateOptions,
|
ClaimCreateOptions: claimCreateOptions,
|
||||||
CoverURL: bannerURL,
|
CoverURL: bannerURL,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
c, err = s.daemon.ChannelCreate(s.LbryChannelName, channelBidAmount, jsonrpc.ChannelCreateOptions{
|
logUtils.SendInfoToSlack("%s (%s) has a channel with old metadata but isn't in our control anymore. Ignoring", s.DbChannelData.DesiredChannelName, s.DbChannelData.ChannelClaimID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c, err = s.daemon.ChannelCreate(s.DbChannelData.DesiredChannelName, channelClaimAmount, jsonrpc.ChannelCreateOptions{
|
||||||
ClaimCreateOptions: claimCreateOptions,
|
ClaimCreateOptions: claimCreateOptions,
|
||||||
CoverURL: bannerURL,
|
CoverURL: bannerURL,
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
claimId, err2 := s.getChannelClaimIDForTimedOutCreation()
|
||||||
|
if err2 != nil {
|
||||||
|
err = errors.Prefix(err2.Error(), err)
|
||||||
|
} else {
|
||||||
|
recoveredChannelClaimID = claimId
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.lbryChannelID = c.Outputs[0].ClaimID
|
if recoveredChannelClaimID != "" {
|
||||||
return s.Manager.apiConfig.SetChannelClaimID(s.YoutubeChannelID, s.lbryChannelID)
|
s.DbChannelData.ChannelClaimID = recoveredChannelClaimID
|
||||||
|
} else {
|
||||||
|
s.DbChannelData.ChannelClaimID = c.Outputs[0].ClaimID
|
||||||
|
}
|
||||||
|
return s.Manager.ApiConfig.SetChannelClaimID(s.DbChannelData.ChannelId, s.DbChannelData.ChannelClaimID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func allUTXOsConfirmed(utxolist *jsonrpc.UTXOListResponse) bool {
|
//getChannelClaimIDForTimedOutCreation is a raw function that returns the only channel that exists in the wallet
|
||||||
if utxolist == nil {
|
// this is used because the SDK sucks and can't figure out when to return when creating a claim...
|
||||||
return false
|
func (s *Sync) getChannelClaimIDForTimedOutCreation() (string, error) {
|
||||||
|
channels, err := s.daemon.ChannelList(nil, 1, 500, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if channels == nil {
|
||||||
|
return "", errors.Err("no channel response")
|
||||||
|
}
|
||||||
|
if len((*channels).Items) != 1 {
|
||||||
|
return "", errors.Err("more than one channel found when trying to recover from SDK failure in creating the channel")
|
||||||
|
}
|
||||||
|
desiredChannel := (*channels).Items[0]
|
||||||
|
if desiredChannel.Name != s.DbChannelData.DesiredChannelName {
|
||||||
|
return "", errors.Err("the channel found in the wallet has a different name than the one we expected")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(*utxolist) < 1 {
|
return desiredChannel.ClaimID, nil
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, utxo := range *utxolist {
|
|
||||||
if utxo.Confirmations <= 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Sync) addCredits(amountToAdd float64) error {
|
func (s *Sync) addCredits(amountToAdd float64) error {
|
||||||
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("addCredits").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
log.Printf("Adding %f credits", amountToAdd)
|
log.Printf("Adding %f credits", amountToAdd)
|
||||||
lbrycrdd, err := logUtils.GetLbrycrdClient(s.LbrycrdString)
|
lbrycrdd, err := logUtils.GetLbrycrdClient(s.Manager.LbrycrdDsn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
addressResp, err := s.daemon.AddressUnused(nil)
|
defaultAccount, err := s.getDefaultAccount()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
addressResp, err := s.daemon.AddressUnused(&defaultAccount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else if addressResp == nil {
|
} else if addressResp == nil {
|
||||||
|
|
349
manager/transfer.go
Normal file
349
manager/transfer.go
Normal file
|
@ -0,0 +1,349 @@
|
||||||
|
package manager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/jsonrpc"
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||||
|
"github.com/lbryio/ytsync/v5/shared"
|
||||||
|
"github.com/lbryio/ytsync/v5/timing"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func waitConfirmations(s *Sync) error {
|
||||||
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("waitConfirmations").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
|
defaultAccount, err := s.getDefaultAccount()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
allConfirmed := false
|
||||||
|
waitCount := 0
|
||||||
|
waiting:
|
||||||
|
for !allConfirmed && waitCount < 2 {
|
||||||
|
utxolist, err := s.daemon.UTXOList(&defaultAccount, 1, 10000)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
} else if utxolist == nil {
|
||||||
|
return errors.Err("no response")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, utxo := range utxolist.Items {
|
||||||
|
if utxo.Confirmations <= 0 {
|
||||||
|
err = s.waitForNewBlock()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
waitCount++
|
||||||
|
continue waiting
|
||||||
|
}
|
||||||
|
}
|
||||||
|
allConfirmed = true
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type abandonResponse struct {
|
||||||
|
ClaimID string
|
||||||
|
Error error
|
||||||
|
Amount float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func abandonSupports(s *Sync) (float64, error) {
|
||||||
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("abandonSupports").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
|
totalPages := uint64(1)
|
||||||
|
var allSupports []jsonrpc.Claim
|
||||||
|
defaultAccount, err := s.getDefaultAccount()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
for page := uint64(1); page <= totalPages; page++ {
|
||||||
|
supports, err := s.daemon.SupportList(&defaultAccount, page, 50)
|
||||||
|
if err != nil {
|
||||||
|
supports, err = s.daemon.SupportList(&defaultAccount, page, 50)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Prefix("cannot list supports", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
allSupports = append(allSupports, (*supports).Items...)
|
||||||
|
totalPages = (*supports).TotalPages
|
||||||
|
}
|
||||||
|
producerWG := &stop.Group{}
|
||||||
|
|
||||||
|
claimIDChan := make(chan string, len(allSupports))
|
||||||
|
abandonRspChan := make(chan abandonResponse, len(allSupports))
|
||||||
|
alreadyAbandoned := make(map[string]bool, len(allSupports))
|
||||||
|
producerWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer producerWG.Done()
|
||||||
|
for _, support := range allSupports {
|
||||||
|
_, ok := alreadyAbandoned[support.ClaimID]
|
||||||
|
if ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
alreadyAbandoned[support.ClaimID] = true
|
||||||
|
claimIDChan <- support.ClaimID
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
consumerWG := &stop.Group{}
|
||||||
|
//TODO: remove this once the SDK team fixes their RPC bugs....
|
||||||
|
s.daemon.SetRPCTimeout(60 * time.Second)
|
||||||
|
defer s.daemon.SetRPCTimeout(5 * time.Minute)
|
||||||
|
for i := 0; i < s.Manager.CliFlags.ConcurrentJobs; i++ {
|
||||||
|
consumerWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer consumerWG.Done()
|
||||||
|
outer:
|
||||||
|
for {
|
||||||
|
claimID, more := <-claimIDChan
|
||||||
|
if !more {
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
summary, err := s.daemon.TxoSpend(util.PtrToString("support"), &claimID, nil, nil, nil, &defaultAccount)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") {
|
||||||
|
log.Errorf("Support abandon for %s timed out, retrying...", claimID)
|
||||||
|
summary, err = s.daemon.TxoSpend(util.PtrToString("support"), &claimID, nil, nil, nil, &defaultAccount)
|
||||||
|
if err != nil {
|
||||||
|
//TODO GUESS HOW MUCH LBC WAS RELEASED THAT WE DON'T KNOW ABOUT, because screw you SDK
|
||||||
|
abandonRspChan <- abandonResponse{
|
||||||
|
ClaimID: claimID,
|
||||||
|
Error: err,
|
||||||
|
Amount: 0, // this is likely wrong, but oh well... there is literally nothing I can do about it
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
abandonRspChan <- abandonResponse{
|
||||||
|
ClaimID: claimID,
|
||||||
|
Error: err,
|
||||||
|
Amount: 0,
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if summary == nil || len(*summary) < 1 {
|
||||||
|
abandonRspChan <- abandonResponse{
|
||||||
|
ClaimID: claimID,
|
||||||
|
Error: errors.Err("error abandoning supports: no outputs while abandoning %s", claimID),
|
||||||
|
Amount: 0,
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var outputAmount float64
|
||||||
|
for _, tx := range *summary {
|
||||||
|
amount, err := strconv.ParseFloat(tx.Outputs[0].Amount, 64)
|
||||||
|
if err != nil {
|
||||||
|
abandonRspChan <- abandonResponse{
|
||||||
|
ClaimID: claimID,
|
||||||
|
Error: errors.Err(err),
|
||||||
|
Amount: 0,
|
||||||
|
}
|
||||||
|
continue outer
|
||||||
|
}
|
||||||
|
outputAmount += amount
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
abandonRspChan <- abandonResponse{
|
||||||
|
ClaimID: claimID,
|
||||||
|
Error: errors.Err(err),
|
||||||
|
Amount: 0,
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.Infof("Abandoned supports of %.4f LBC for claim %s", outputAmount, claimID)
|
||||||
|
abandonRspChan <- abandonResponse{
|
||||||
|
ClaimID: claimID,
|
||||||
|
Error: nil,
|
||||||
|
Amount: outputAmount,
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
producerWG.Wait()
|
||||||
|
close(claimIDChan)
|
||||||
|
consumerWG.Wait()
|
||||||
|
close(abandonRspChan)
|
||||||
|
|
||||||
|
totalAbandoned := 0.0
|
||||||
|
for r := range abandonRspChan {
|
||||||
|
if r.Error != nil {
|
||||||
|
log.Errorf("Failed abandoning supports for %s: %s", r.ClaimID, r.Error.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalAbandoned += r.Amount
|
||||||
|
}
|
||||||
|
return totalAbandoned, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type updateInfo struct {
|
||||||
|
ClaimID string
|
||||||
|
streamUpdateOptions *jsonrpc.StreamUpdateOptions
|
||||||
|
videoStatus *shared.VideoStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func transferVideos(s *Sync) error {
|
||||||
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("transferVideos").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
|
cleanTransfer := true
|
||||||
|
|
||||||
|
streamChan := make(chan updateInfo, s.Manager.CliFlags.ConcurrentJobs)
|
||||||
|
account, err := s.getDefaultAccount()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
streams, err := s.daemon.StreamList(&account, 1, 30000)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
producerWG := &stop.Group{}
|
||||||
|
producerWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer producerWG.Done()
|
||||||
|
for _, video := range s.syncedVideos {
|
||||||
|
if !video.Published || video.Transferred || video.MetadataVersion != shared.LatestMetadataVersion {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var stream *jsonrpc.Claim = nil
|
||||||
|
for _, c := range streams.Items {
|
||||||
|
if c.ClaimID != video.ClaimID || (c.SigningChannel != nil && c.SigningChannel.ClaimID != s.DbChannelData.ChannelClaimID) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
stream = &c
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if stream == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
streamUpdateOptions := jsonrpc.StreamUpdateOptions{
|
||||||
|
StreamCreateOptions: &jsonrpc.StreamCreateOptions{
|
||||||
|
ClaimCreateOptions: jsonrpc.ClaimCreateOptions{
|
||||||
|
ClaimAddress: &s.DbChannelData.PublishAddress.Address,
|
||||||
|
FundingAccountIDs: []string{
|
||||||
|
account,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Bid: util.PtrToString(fmt.Sprintf("%.5f", publishAmount/2.)),
|
||||||
|
}
|
||||||
|
videoStatus := shared.VideoStatus{
|
||||||
|
ChannelID: s.DbChannelData.ChannelId,
|
||||||
|
VideoID: video.VideoID,
|
||||||
|
ClaimID: video.ClaimID,
|
||||||
|
ClaimName: video.ClaimName,
|
||||||
|
Status: shared.VideoStatusPublished,
|
||||||
|
IsTransferred: util.PtrToBool(true),
|
||||||
|
}
|
||||||
|
streamChan <- updateInfo{
|
||||||
|
ClaimID: video.ClaimID,
|
||||||
|
streamUpdateOptions: &streamUpdateOptions,
|
||||||
|
videoStatus: &videoStatus,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
consumerWG := &stop.Group{}
|
||||||
|
for i := 0; i < s.Manager.CliFlags.ConcurrentJobs; i++ {
|
||||||
|
consumerWG.Add(1)
|
||||||
|
go func(worker int) {
|
||||||
|
defer consumerWG.Done()
|
||||||
|
for {
|
||||||
|
ui, more := <-streamChan
|
||||||
|
if !more {
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
err := s.streamUpdate(&ui)
|
||||||
|
if err != nil {
|
||||||
|
cleanTransfer = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
producerWG.Wait()
|
||||||
|
close(streamChan)
|
||||||
|
consumerWG.Wait()
|
||||||
|
|
||||||
|
if !cleanTransfer {
|
||||||
|
return errors.Err("A video has failed to transfer for the channel...skipping channel transfer")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sync) streamUpdate(ui *updateInfo) error {
|
||||||
|
start := time.Now()
|
||||||
|
result, updateError := s.daemon.StreamUpdate(ui.ClaimID, *ui.streamUpdateOptions)
|
||||||
|
timing.TimedComponent("transferStreamUpdate").Add(time.Since(start))
|
||||||
|
if updateError != nil {
|
||||||
|
ui.videoStatus.FailureReason = updateError.Error()
|
||||||
|
ui.videoStatus.Status = shared.VideoStatusTransferFailed
|
||||||
|
ui.videoStatus.IsTransferred = util.PtrToBool(false)
|
||||||
|
} else {
|
||||||
|
ui.videoStatus.IsTransferred = util.PtrToBool(len(result.Outputs) != 0)
|
||||||
|
}
|
||||||
|
log.Infof("TRANSFERRED %t", *ui.videoStatus.IsTransferred)
|
||||||
|
statusErr := s.Manager.ApiConfig.MarkVideoStatus(*ui.videoStatus)
|
||||||
|
if statusErr != nil {
|
||||||
|
return errors.Prefix(statusErr.Error(), updateError)
|
||||||
|
}
|
||||||
|
return errors.Err(updateError)
|
||||||
|
}
|
||||||
|
|
||||||
|
func transferChannel(s *Sync) error {
|
||||||
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("transferChannel").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
|
account, err := s.getDefaultAccount()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
channelClaims, err := s.daemon.ChannelList(&account, 1, 50, nil)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
var channelClaim *jsonrpc.Transaction = nil
|
||||||
|
for _, c := range channelClaims.Items {
|
||||||
|
if c.ClaimID != s.DbChannelData.ChannelClaimID {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
channelClaim = &c
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if channelClaim == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
updateOptions := jsonrpc.ChannelUpdateOptions{
|
||||||
|
Bid: util.PtrToString(fmt.Sprintf("%.6f", channelClaimAmount-0.005)),
|
||||||
|
ChannelCreateOptions: jsonrpc.ChannelCreateOptions{
|
||||||
|
ClaimCreateOptions: jsonrpc.ClaimCreateOptions{
|
||||||
|
ClaimAddress: &s.DbChannelData.PublishAddress.Address,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
result, err := s.daemon.ChannelUpdate(s.DbChannelData.ChannelClaimID, updateOptions)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
log.Infof("TRANSFERRED %t", len(result.Outputs) != 0)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
File diff suppressed because it is too large
Load diff
17
metrics/metrics.go
Normal file
17
metrics/metrics.go
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/lbryio/ytsync/v5/configs"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
Durations = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||||
|
Namespace: "ytsync",
|
||||||
|
Subsystem: configs.Configuration.GetHostname(),
|
||||||
|
Name: "duration",
|
||||||
|
Help: "The durations of the individual modules",
|
||||||
|
}, []string{"path"})
|
||||||
|
)
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
var titleRegexp = regexp.MustCompile(`[^a-zA-Z0-9]+`)
|
var claimNameRegexp = regexp.MustCompile(`[=&#:$@%??;、\\"/<>%{}||^~\x60[\]\s]`)
|
||||||
|
|
||||||
type Namer struct {
|
type Namer struct {
|
||||||
mu *sync.Mutex
|
mu *sync.Mutex
|
||||||
|
@ -43,9 +43,16 @@ func (n *Namer) GetNextName(prefix string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
//if for some reasons the title can't be converted in a valid claim name (too short or not latin) then we use a hash
|
//if for some reasons the title can't be converted in a valid claim name (too short or not latin) then we use a hash
|
||||||
|
attempt = 1
|
||||||
if len(name) < 2 {
|
if len(name) < 2 {
|
||||||
sum := md5.Sum([]byte(prefix))
|
sum := md5.Sum([]byte(prefix))
|
||||||
|
for {
|
||||||
name = fmt.Sprintf("%s-%d", hex.EncodeToString(sum[:])[:15], attempt)
|
name = fmt.Sprintf("%s-%d", hex.EncodeToString(sum[:])[:15], attempt)
|
||||||
|
if _, exists := n.names[name]; !exists {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
attempt++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n.names[name] = true
|
n.names[name] = true
|
||||||
|
@ -61,18 +68,21 @@ func getClaimNameFromTitle(title string, attempt int) string {
|
||||||
}
|
}
|
||||||
maxLen := 40 - len(suffix)
|
maxLen := 40 - len(suffix)
|
||||||
|
|
||||||
chunks := strings.Split(strings.ToLower(strings.Trim(titleRegexp.ReplaceAllString(title, "-"), "-")), "-")
|
chunks := strings.Split(strings.ToLower(strings.Trim(claimNameRegexp.ReplaceAllString(title, "-"), "-")), "-")
|
||||||
|
|
||||||
name := chunks[0]
|
name := chunks[0]
|
||||||
if len(name) > maxLen {
|
if len(name) > maxLen {
|
||||||
return name[:maxLen]
|
return truncateUnicode(name, maxLen) + suffix
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, chunk := range chunks[1:] {
|
for _, chunk := range chunks[1:] {
|
||||||
|
if chunk == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
tmpName := name + "-" + chunk
|
tmpName := name + "-" + chunk
|
||||||
if len(tmpName) > maxLen {
|
if len(tmpName) > maxLen {
|
||||||
if len(name) < 20 {
|
if len(name) < 20 {
|
||||||
name = tmpName[:maxLen]
|
name = truncateUnicode(tmpName, maxLen-len(name))
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -81,3 +91,18 @@ func getClaimNameFromTitle(title string, attempt int) string {
|
||||||
|
|
||||||
return name + suffix
|
return name + suffix
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func truncateUnicode(name string, limit int) string {
|
||||||
|
reNameBlacklist := regexp.MustCompile(`(&|>|<|\/|:|\n|\r)*`)
|
||||||
|
name = reNameBlacklist.ReplaceAllString(name, "")
|
||||||
|
result := name
|
||||||
|
chars := 0
|
||||||
|
for i := range name {
|
||||||
|
if chars >= limit {
|
||||||
|
result = name[:i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
chars++
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
28
namer/names_test.go
Normal file
28
namer/names_test.go
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
package namer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_getClaimNameFromTitle(t *testing.T) {
|
||||||
|
name := getClaimNameFromTitle("СтопХам - \"В ожидании ответа\"", 0)
|
||||||
|
assert.Equal(t, "стопхам-в-ожидании", name)
|
||||||
|
name = getClaimNameFromTitle("SADB - \"A Weak Woman With a Strong Hood\"", 0)
|
||||||
|
assert.Equal(t, "sadb-a-weak-woman-with-a-strong-hood", name)
|
||||||
|
name = getClaimNameFromTitle("錢包整理術 5 Tips、哪種錢包最NG?|有錢人默默在做的「錢包整理術」 ft.@SHIN LI", 0)
|
||||||
|
assert.Equal(t, "錢包整理術-5-tips-哪種錢包最ng", name)
|
||||||
|
name = getClaimNameFromTitle("اسرع-طريقة-لتختيم", 0)
|
||||||
|
assert.Equal(t, "اسرع-طريقة-لتختيم", name)
|
||||||
|
name = getClaimNameFromTitle("شكرا على 380 مشترك😍😍😍😍 لي يريد دعم ادا وصلنا المقطع 40 لايك وراح ادعم قناتين", 0)
|
||||||
|
assert.Equal(t, "شكرا-على-380-مشترك😍😍😍", name)
|
||||||
|
name = getClaimNameFromTitle("test-@", 0)
|
||||||
|
assert.Equal(t, "test", name)
|
||||||
|
name = getClaimNameFromTitle("『あなたはただの空の殻でした』", 0)
|
||||||
|
assert.Equal(t, "『あなたはただの空の殻でした』", name)
|
||||||
|
name = getClaimNameFromTitle("精靈樂章-這樣的夥伴沒問題嗎 幽暗隕石坑(夢魘) 王有無敵狀態...要會閃不然會被秒(無課)", 2)
|
||||||
|
assert.Equal(t, "精靈樂章-這樣的夥伴沒問題嗎-2", name)
|
||||||
|
name = getClaimNameFromTitle("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 50)
|
||||||
|
assert.Equal(t, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-50", name)
|
||||||
|
}
|
274
sdk/api.go
274
sdk/api.go
|
@ -11,8 +11,12 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/extras/errors"
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
"github.com/lbryio/lbry.go/extras/null"
|
"github.com/lbryio/lbry.go/v2/extras/null"
|
||||||
|
"github.com/lbryio/ytsync/v5/configs"
|
||||||
|
"github.com/lbryio/ytsync/v5/shared"
|
||||||
|
|
||||||
|
"github.com/lbryio/ytsync/v5/util"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
@ -22,52 +26,53 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type APIConfig struct {
|
type APIConfig struct {
|
||||||
YoutubeAPIKey string
|
|
||||||
ApiURL string
|
ApiURL string
|
||||||
ApiToken string
|
ApiToken string
|
||||||
HostName string
|
HostName string
|
||||||
}
|
}
|
||||||
|
|
||||||
type SyncProperties struct {
|
var instance *APIConfig
|
||||||
SyncFrom int64
|
|
||||||
SyncUntil int64
|
func GetAPIsConfigs() *APIConfig {
|
||||||
YoutubeChannelID string
|
if instance == nil {
|
||||||
|
instance = &APIConfig{
|
||||||
|
ApiURL: configs.Configuration.InternalApisEndpoint,
|
||||||
|
ApiToken: configs.Configuration.InternalApisAuthToken,
|
||||||
|
HostName: configs.Configuration.GetHostname(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return instance
|
||||||
}
|
}
|
||||||
|
|
||||||
type Fee struct {
|
func (a *APIConfig) FetchChannels(status string, cliFlags *shared.SyncFlags) ([]shared.YoutubeChannel, error) {
|
||||||
Amount string `json:"amount"`
|
|
||||||
Address string `json:"address"`
|
|
||||||
Currency string `json:"currency"`
|
|
||||||
}
|
|
||||||
type YoutubeChannel struct {
|
|
||||||
ChannelId string `json:"channel_id"`
|
|
||||||
TotalVideos uint `json:"total_videos"`
|
|
||||||
DesiredChannelName string `json:"desired_channel_name"`
|
|
||||||
Fee *Fee `json:"fee"`
|
|
||||||
ChannelClaimID string `json:"channel_claim_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *APIConfig) FetchChannels(status string, cp *SyncProperties) ([]YoutubeChannel, error) {
|
|
||||||
type apiJobsResponse struct {
|
type apiJobsResponse struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Error null.String `json:"error"`
|
Error null.String `json:"error"`
|
||||||
Data []YoutubeChannel `json:"data"`
|
Data []shared.YoutubeChannel `json:"data"`
|
||||||
}
|
}
|
||||||
endpoint := a.ApiURL + "/yt/jobs"
|
endpoint := a.ApiURL + "/yt/jobs"
|
||||||
res, err := http.PostForm(endpoint, url.Values{
|
res, err := http.PostForm(endpoint, url.Values{
|
||||||
"auth_token": {a.ApiToken},
|
"auth_token": {a.ApiToken},
|
||||||
"sync_status": {status},
|
"sync_status": {status},
|
||||||
"min_videos": {strconv.Itoa(1)},
|
"min_videos": {strconv.Itoa(1)},
|
||||||
"after": {strconv.Itoa(int(cp.SyncFrom))},
|
"after": {strconv.Itoa(int(cliFlags.SyncFrom))},
|
||||||
"before": {strconv.Itoa(int(cp.SyncUntil))},
|
"before": {strconv.Itoa(int(cliFlags.SyncUntil))},
|
||||||
"sync_server": {a.HostName},
|
"sync_server": {a.HostName},
|
||||||
"channel_id": {cp.YoutubeChannelID},
|
"channel_id": {cliFlags.ChannelID},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Err(err)
|
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.FetchChannels(status, cliFlags)
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
body, _ := ioutil.ReadAll(res.Body)
|
body, _ := ioutil.ReadAll(res.Body)
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
|
||||||
|
log.Debugln(string(body))
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.FetchChannels(status, cliFlags)
|
||||||
|
}
|
||||||
var response apiJobsResponse
|
var response apiJobsResponse
|
||||||
err = json.Unmarshal(body, &response)
|
err = json.Unmarshal(body, &response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -88,6 +93,8 @@ type SyncedVideo struct {
|
||||||
ClaimID string `json:"claim_id"`
|
ClaimID string `json:"claim_id"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
MetadataVersion int8 `json:"metadata_version"`
|
MetadataVersion int8 `json:"metadata_version"`
|
||||||
|
Transferred bool `json:"transferred"`
|
||||||
|
IsLbryFirst bool `json:"is_lbry_first"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func sanitizeFailureReason(s *string) {
|
func sanitizeFailureReason(s *string) {
|
||||||
|
@ -100,7 +107,6 @@ func sanitizeFailureReason(s *string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *APIConfig) SetChannelCert(certHex string, channelID string) error {
|
func (a *APIConfig) SetChannelCert(certHex string, channelID string) error {
|
||||||
|
|
||||||
type apiSetChannelCertResponse struct {
|
type apiSetChannelCertResponse struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Error null.String `json:"error"`
|
Error null.String `json:"error"`
|
||||||
|
@ -109,17 +115,26 @@ func (a *APIConfig) SetChannelCert(certHex string, channelID string) error {
|
||||||
|
|
||||||
endpoint := a.ApiURL + "/yt/channel_cert"
|
endpoint := a.ApiURL + "/yt/channel_cert"
|
||||||
|
|
||||||
res, _ := http.PostForm(endpoint, url.Values{
|
res, err := http.PostForm(endpoint, url.Values{
|
||||||
"channel_claim_id": {channelID},
|
"channel_claim_id": {channelID},
|
||||||
"channel_cert": {certHex},
|
"channel_cert": {certHex},
|
||||||
"auth_token": {a.ApiToken},
|
"auth_token": {a.ApiToken},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.SetChannelCert(certHex, channelID)
|
||||||
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
|
|
||||||
body, _ := ioutil.ReadAll(res.Body)
|
body, _ := ioutil.ReadAll(res.Body)
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
|
||||||
|
log.Debugln(string(body))
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.SetChannelCert(certHex, channelID)
|
||||||
|
}
|
||||||
var response apiSetChannelCertResponse
|
var response apiSetChannelCertResponse
|
||||||
err := json.Unmarshal(body, &response)
|
err = json.Unmarshal(body, &response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
@ -128,10 +143,9 @@ func (a *APIConfig) SetChannelCert(certHex string, channelID string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *APIConfig) SetChannelStatus(channelID string, status string, failureReason string) (map[string]SyncedVideo, map[string]bool, error) {
|
func (a *APIConfig) SetChannelStatus(channelID string, status string, failureReason string, transferState *int) (map[string]SyncedVideo, map[string]bool, error) {
|
||||||
type apiChannelStatusResponse struct {
|
type apiChannelStatusResponse struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Error null.String `json:"error"`
|
Error null.String `json:"error"`
|
||||||
|
@ -140,17 +154,32 @@ func (a *APIConfig) SetChannelStatus(channelID string, status string, failureRea
|
||||||
endpoint := a.ApiURL + "/yt/channel_status"
|
endpoint := a.ApiURL + "/yt/channel_status"
|
||||||
|
|
||||||
sanitizeFailureReason(&failureReason)
|
sanitizeFailureReason(&failureReason)
|
||||||
res, _ := http.PostForm(endpoint, url.Values{
|
params := url.Values{
|
||||||
"channel_id": {channelID},
|
"channel_id": {channelID},
|
||||||
"sync_server": {a.HostName},
|
"sync_server": {a.HostName},
|
||||||
"auth_token": {a.ApiToken},
|
"auth_token": {a.ApiToken},
|
||||||
"sync_status": {status},
|
"sync_status": {status},
|
||||||
"failure_reason": {failureReason},
|
"failure_reason": {failureReason},
|
||||||
})
|
}
|
||||||
|
if transferState != nil {
|
||||||
|
params.Add("transfer_state", strconv.Itoa(*transferState))
|
||||||
|
}
|
||||||
|
res, err := http.PostForm(endpoint, params)
|
||||||
|
if err != nil {
|
||||||
|
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.SetChannelStatus(channelID, status, failureReason, transferState)
|
||||||
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
body, _ := ioutil.ReadAll(res.Body)
|
body, _ := ioutil.ReadAll(res.Body)
|
||||||
|
if res.StatusCode >= http.StatusInternalServerError {
|
||||||
|
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
|
||||||
|
log.Debugln(string(body))
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.SetChannelStatus(channelID, status, failureReason, transferState)
|
||||||
|
}
|
||||||
var response apiChannelStatusResponse
|
var response apiChannelStatusResponse
|
||||||
err := json.Unmarshal(body, &response)
|
err = json.Unmarshal(body, &response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Err(err)
|
return nil, nil, errors.Err(err)
|
||||||
}
|
}
|
||||||
|
@ -178,15 +207,26 @@ func (a *APIConfig) SetChannelClaimID(channelID string, channelClaimID string) e
|
||||||
Data string `json:"data"`
|
Data string `json:"data"`
|
||||||
}
|
}
|
||||||
endpoint := a.ApiURL + "/yt/set_channel_claim_id"
|
endpoint := a.ApiURL + "/yt/set_channel_claim_id"
|
||||||
res, _ := http.PostForm(endpoint, url.Values{
|
res, err := http.PostForm(endpoint, url.Values{
|
||||||
"channel_id": {channelID},
|
"channel_id": {channelID},
|
||||||
"auth_token": {a.ApiToken},
|
"auth_token": {a.ApiToken},
|
||||||
"channel_claim_id": {channelClaimID},
|
"channel_claim_id": {channelClaimID},
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.SetChannelClaimID(channelID, channelClaimID)
|
||||||
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
body, _ := ioutil.ReadAll(res.Body)
|
body, _ := ioutil.ReadAll(res.Body)
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
|
||||||
|
log.Debugln(string(body))
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.SetChannelClaimID(channelID, channelClaimID)
|
||||||
|
}
|
||||||
var response apiChannelStatusResponse
|
var response apiChannelStatusResponse
|
||||||
err := json.Unmarshal(body, &response)
|
err = json.Unmarshal(body, &response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
@ -212,15 +252,26 @@ func (a *APIConfig) DeleteVideos(videos []string) error {
|
||||||
"video_ids": {videoIDs},
|
"video_ids": {videoIDs},
|
||||||
"auth_token": {a.ApiToken},
|
"auth_token": {a.ApiToken},
|
||||||
}
|
}
|
||||||
res, _ := http.PostForm(endpoint, vals)
|
res, err := http.PostForm(endpoint, vals)
|
||||||
|
if err != nil {
|
||||||
|
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.DeleteVideos(videos)
|
||||||
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
body, _ := ioutil.ReadAll(res.Body)
|
body, _ := ioutil.ReadAll(res.Body)
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
|
||||||
|
log.Debugln(string(body))
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.DeleteVideos(videos)
|
||||||
|
}
|
||||||
var response struct {
|
var response struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Error null.String `json:"error"`
|
Error null.String `json:"error"`
|
||||||
Data null.String `json:"data"`
|
Data null.String `json:"data"`
|
||||||
}
|
}
|
||||||
err := json.Unmarshal(body, &response)
|
err = json.Unmarshal(body, &response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
@ -234,42 +285,56 @@ func (a *APIConfig) DeleteVideos(videos []string) error {
|
||||||
return errors.Err("invalid API response. Status code: %d", res.StatusCode)
|
return errors.Err("invalid API response. Status code: %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *APIConfig) MarkVideoStatus(channelID string, videoID string, status string, claimID string, claimName string, failureReason string, size *int64, metadataVersion uint) error {
|
func (a *APIConfig) MarkVideoStatus(status shared.VideoStatus) error {
|
||||||
endpoint := a.ApiURL + "/yt/video_status"
|
endpoint := a.ApiURL + "/yt/video_status"
|
||||||
|
|
||||||
sanitizeFailureReason(&failureReason)
|
sanitizeFailureReason(&status.FailureReason)
|
||||||
vals := url.Values{
|
vals := url.Values{
|
||||||
"youtube_channel_id": {channelID},
|
"youtube_channel_id": {status.ChannelID},
|
||||||
"video_id": {videoID},
|
"video_id": {status.VideoID},
|
||||||
"status": {status},
|
"status": {status.Status},
|
||||||
"auth_token": {a.ApiToken},
|
"auth_token": {a.ApiToken},
|
||||||
}
|
}
|
||||||
if status == VideoStatusPublished || status == VideoStatusUpgradeFailed {
|
if status.Status == VideoStatusPublished || status.Status == VideoStatusUpgradeFailed {
|
||||||
if claimID == "" || claimName == "" {
|
if status.ClaimID == "" || status.ClaimName == "" {
|
||||||
return errors.Err("claimID (%s) or claimName (%s) missing", claimID, claimName)
|
return errors.Err("claimID (%s) or claimName (%s) missing", status.ClaimID, status.ClaimName)
|
||||||
}
|
}
|
||||||
vals.Add("published_at", strconv.FormatInt(time.Now().Unix(), 10))
|
vals.Add("published_at", strconv.FormatInt(time.Now().Unix(), 10))
|
||||||
vals.Add("claim_id", claimID)
|
vals.Add("claim_id", status.ClaimID)
|
||||||
vals.Add("claim_name", claimName)
|
vals.Add("claim_name", status.ClaimName)
|
||||||
if metadataVersion > 0 {
|
if status.MetaDataVersion > 0 {
|
||||||
vals.Add("metadata_version", fmt.Sprintf("%d", metadataVersion))
|
vals.Add("metadata_version", fmt.Sprintf("%d", status.MetaDataVersion))
|
||||||
}
|
}
|
||||||
if size != nil {
|
if status.Size != nil {
|
||||||
vals.Add("size", strconv.FormatInt(*size, 10))
|
vals.Add("size", strconv.FormatInt(*status.Size, 10))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if failureReason != "" {
|
if status.FailureReason != "" {
|
||||||
vals.Add("failure_reason", failureReason)
|
vals.Add("failure_reason", status.FailureReason)
|
||||||
|
}
|
||||||
|
if status.IsTransferred != nil {
|
||||||
|
vals.Add("transferred", strconv.FormatBool(*status.IsTransferred))
|
||||||
|
}
|
||||||
|
res, err := http.PostForm(endpoint, vals)
|
||||||
|
if err != nil {
|
||||||
|
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.MarkVideoStatus(status)
|
||||||
}
|
}
|
||||||
res, _ := http.PostForm(endpoint, vals)
|
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
body, _ := ioutil.ReadAll(res.Body)
|
body, _ := ioutil.ReadAll(res.Body)
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
|
||||||
|
log.Debugln(string(body))
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.MarkVideoStatus(status)
|
||||||
|
}
|
||||||
var response struct {
|
var response struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Error null.String `json:"error"`
|
Error null.String `json:"error"`
|
||||||
Data null.String `json:"data"`
|
Data null.String `json:"data"`
|
||||||
}
|
}
|
||||||
err := json.Unmarshal(body, &response)
|
err = json.Unmarshal(body, &response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -281,3 +346,96 @@ func (a *APIConfig) MarkVideoStatus(channelID string, videoID string, status str
|
||||||
}
|
}
|
||||||
return errors.Err("invalid API response. Status code: %d", res.StatusCode)
|
return errors.Err("invalid API response. Status code: %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *APIConfig) VideoState(videoID string) (string, error) {
|
||||||
|
endpoint := a.ApiURL + "/yt/video_state"
|
||||||
|
vals := url.Values{
|
||||||
|
"video_id": {videoID},
|
||||||
|
"auth_token": {a.ApiToken},
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := http.PostForm(endpoint, vals)
|
||||||
|
if err != nil {
|
||||||
|
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.VideoState(videoID)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
body, _ := ioutil.ReadAll(res.Body)
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return "not_found", nil
|
||||||
|
}
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
|
||||||
|
log.Debugln(string(body))
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.VideoState(videoID)
|
||||||
|
}
|
||||||
|
var response struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Error null.String `json:"error"`
|
||||||
|
Data null.String `json:"data"`
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(body, &response)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Err(err)
|
||||||
|
}
|
||||||
|
if !response.Error.IsNull() {
|
||||||
|
return "", errors.Err(response.Error.String)
|
||||||
|
}
|
||||||
|
if !response.Data.IsNull() {
|
||||||
|
return response.Data.String, nil
|
||||||
|
}
|
||||||
|
return "", errors.Err("invalid API response. Status code: %d", res.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
type VideoRelease struct {
|
||||||
|
ID uint64 `json:"id"`
|
||||||
|
YoutubeDataID uint64 `json:"youtube_data_id"`
|
||||||
|
VideoID string `json:"video_id"`
|
||||||
|
ReleaseTime string `json:"release_time"`
|
||||||
|
CreatedAt string `json:"created_at"`
|
||||||
|
UpdatedAt string `json:"updated_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *APIConfig) GetReleasedDate(videoID string) (*VideoRelease, error) {
|
||||||
|
endpoint := a.ApiURL + "/yt/released"
|
||||||
|
vals := url.Values{
|
||||||
|
"video_id": {videoID},
|
||||||
|
"auth_token": {a.ApiToken},
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := http.PostForm(endpoint, vals)
|
||||||
|
if err != nil {
|
||||||
|
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.GetReleasedDate(videoID)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
body, _ := ioutil.ReadAll(res.Body)
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
|
||||||
|
log.Debugln(string(body))
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
return a.GetReleasedDate(videoID)
|
||||||
|
}
|
||||||
|
var response struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
Error null.String `json:"error"`
|
||||||
|
Data VideoRelease `json:"data"`
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(body, &response)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
if !response.Error.IsNull() {
|
||||||
|
return nil, errors.Err(response.Error.String)
|
||||||
|
}
|
||||||
|
if response.Data.ReleaseTime != "" {
|
||||||
|
return &response.Data, nil
|
||||||
|
}
|
||||||
|
return nil, errors.Err("invalid API response. Status code: %d", res.StatusCode)
|
||||||
|
}
|
||||||
|
|
221
shared/shared.go
Normal file
221
shared/shared.go
Normal file
|
@ -0,0 +1,221 @@
|
||||||
|
package shared
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Fee struct {
|
||||||
|
Amount string `json:"amount"`
|
||||||
|
Address string `json:"address"`
|
||||||
|
Currency string `json:"currency"`
|
||||||
|
}
|
||||||
|
type YoutubeChannel struct {
|
||||||
|
ChannelId string `json:"channel_id"`
|
||||||
|
TotalVideos uint `json:"total_videos"`
|
||||||
|
TotalSubscribers uint `json:"total_subscribers"`
|
||||||
|
DesiredChannelName string `json:"desired_channel_name"`
|
||||||
|
Fee *Fee `json:"fee"`
|
||||||
|
ChannelClaimID string `json:"channel_claim_id"`
|
||||||
|
TransferState int `json:"transfer_state"`
|
||||||
|
PublishAddress PublishAddress `json:"publish_address"`
|
||||||
|
PublicKey string `json:"public_key"`
|
||||||
|
LengthLimit int `json:"length_limit"`
|
||||||
|
SizeLimit int `json:"size_limit"`
|
||||||
|
LastUploadedVideo string `json:"last_uploaded_video"`
|
||||||
|
WipeDB bool `json:"wipe_db"`
|
||||||
|
Language string `json:"language"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PublishAddress struct {
|
||||||
|
Address string `json:"address"`
|
||||||
|
IsMine bool `json:"is_mine"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *PublishAddress) UnmarshalJSON(data []byte) error {
|
||||||
|
var s string
|
||||||
|
if err := json.Unmarshal(data, &s); err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
p.Address = s
|
||||||
|
p.IsMine = false
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var FatalErrors = []string{
|
||||||
|
":5279: read: connection reset by peer",
|
||||||
|
"no space left on device",
|
||||||
|
"NotEnoughFunds",
|
||||||
|
"Cannot publish using channel",
|
||||||
|
"cannot concatenate 'str' and 'NoneType' objects",
|
||||||
|
"more than 90% of the space has been used.",
|
||||||
|
"Couldn't find private key for id",
|
||||||
|
"You already have a stream claim published under the name",
|
||||||
|
"Missing inputs",
|
||||||
|
}
|
||||||
|
var ErrorsNoRetry = []string{
|
||||||
|
"Requested format is not available",
|
||||||
|
"non 200 status code received",
|
||||||
|
"This video contains content from",
|
||||||
|
"dont know which claim to update",
|
||||||
|
"uploader has not made this video available in your country",
|
||||||
|
"download error: AccessDenied: Access Denied",
|
||||||
|
"Playback on other websites has been disabled by the video owner",
|
||||||
|
"Error in daemon: Cannot publish empty file",
|
||||||
|
"Error extracting sts from embedded url response",
|
||||||
|
"Unable to extract signature tokens",
|
||||||
|
"Client.Timeout exceeded while awaiting headers",
|
||||||
|
"the video is too big to sync, skipping for now",
|
||||||
|
"video is too long to process",
|
||||||
|
"video is too short to process",
|
||||||
|
"no compatible format available for this video",
|
||||||
|
"Watch this video on YouTube.",
|
||||||
|
"have blocked it on copyright grounds",
|
||||||
|
"the video must be republished as we can't get the right size",
|
||||||
|
"HTTP Error 403",
|
||||||
|
"giving up after 0 fragment retries",
|
||||||
|
"Sorry about that",
|
||||||
|
"This video is not available",
|
||||||
|
"Video unavailable",
|
||||||
|
"requested format not available",
|
||||||
|
"interrupted by user",
|
||||||
|
"Sign in to confirm your age",
|
||||||
|
"This video is unavailable",
|
||||||
|
"video is a live stream and hasn't completed yet",
|
||||||
|
"Premieres in",
|
||||||
|
"Private video",
|
||||||
|
"This live event will begin in",
|
||||||
|
"This video has been removed by the uploader",
|
||||||
|
"Premiere will begin shortly",
|
||||||
|
"cannot unmarshal number 0.0",
|
||||||
|
"default youtube thumbnail found",
|
||||||
|
"livestream is likely bugged",
|
||||||
|
}
|
||||||
|
var WalletErrors = []string{
|
||||||
|
"Not enough funds to cover this transaction",
|
||||||
|
"failed: Not enough funds",
|
||||||
|
"Error in daemon: Insufficient funds, please deposit additional LBC",
|
||||||
|
//"Missing inputs",
|
||||||
|
}
|
||||||
|
var BlockchainErrors = []string{
|
||||||
|
"txn-mempool-conflict",
|
||||||
|
"too-long-mempool-chain",
|
||||||
|
}
|
||||||
|
var NeverRetryFailures = []string{
|
||||||
|
"Error extracting sts from embedded url response",
|
||||||
|
"Unable to extract signature tokens",
|
||||||
|
"the video is too big to sync, skipping for now",
|
||||||
|
"video is too long to process",
|
||||||
|
"video is too short to process",
|
||||||
|
"This video contains content from",
|
||||||
|
"no compatible format available for this video",
|
||||||
|
"Watch this video on YouTube.",
|
||||||
|
"have blocked it on copyright grounds",
|
||||||
|
"giving up after 0 fragment retries",
|
||||||
|
"Sign in to confirm your age",
|
||||||
|
"Playback on other websites has been disabled by the video owner",
|
||||||
|
"uploader has not made this video available in your country",
|
||||||
|
"This video has been removed by the uploader",
|
||||||
|
"Video unavailable",
|
||||||
|
"Video is not available - hardcoded fix",
|
||||||
|
}
|
||||||
|
|
||||||
|
type SyncFlags struct {
|
||||||
|
TakeOverExistingChannel bool
|
||||||
|
SkipSpaceCheck bool
|
||||||
|
SyncUpdate bool
|
||||||
|
SingleRun bool
|
||||||
|
RemoveDBUnpublished bool
|
||||||
|
UpgradeMetadata bool
|
||||||
|
DisableTransfers bool
|
||||||
|
QuickSync bool
|
||||||
|
MaxTries int
|
||||||
|
Refill int
|
||||||
|
Limit int
|
||||||
|
Status string
|
||||||
|
SecondaryStatus string
|
||||||
|
ChannelID string
|
||||||
|
SyncFrom int64
|
||||||
|
SyncUntil int64
|
||||||
|
ConcurrentJobs int
|
||||||
|
VideosLimit int
|
||||||
|
MaxVideoSize int
|
||||||
|
MaxVideoLength time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// VideosToSync dynamically figures out how many videos should be synced for a given subs count if nothing was otherwise specified
|
||||||
|
func (f *SyncFlags) VideosToSync(totalSubscribers uint) int {
|
||||||
|
if f.VideosLimit > 0 {
|
||||||
|
return f.VideosLimit
|
||||||
|
}
|
||||||
|
defaultVideosToSync := map[int]int{
|
||||||
|
10000: 1000,
|
||||||
|
5000: 500,
|
||||||
|
1000: 400,
|
||||||
|
800: 250,
|
||||||
|
600: 200,
|
||||||
|
200: 80,
|
||||||
|
100: 20,
|
||||||
|
1: 10,
|
||||||
|
}
|
||||||
|
videosToSync := 0
|
||||||
|
for s, r := range defaultVideosToSync {
|
||||||
|
if int(totalSubscribers) >= s && r > videosToSync {
|
||||||
|
videosToSync = r
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return videosToSync
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *SyncFlags) IsSingleChannelSync() bool {
|
||||||
|
return f.ChannelID != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type VideoStatus struct {
|
||||||
|
ChannelID string
|
||||||
|
VideoID string
|
||||||
|
Status string
|
||||||
|
ClaimID string
|
||||||
|
ClaimName string
|
||||||
|
FailureReason string
|
||||||
|
Size *int64
|
||||||
|
MetaDataVersion uint
|
||||||
|
IsTransferred *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
StatusPending = "pending" // waiting for permission to sync
|
||||||
|
StatusPendingEmail = "pendingemail" // permission granted but missing email
|
||||||
|
StatusQueued = "queued" // in sync queue. will be synced soon
|
||||||
|
StatusPendingUpgrade = "pendingupgrade" // in sync queue. will be synced soon
|
||||||
|
StatusSyncing = "syncing" // syncing now
|
||||||
|
StatusSynced = "synced" // done
|
||||||
|
StatusWipeDb = "pendingdbwipe" // in sync queue. lbryum database will be pruned
|
||||||
|
StatusFailed = "failed"
|
||||||
|
StatusFinalized = "finalized" // no more changes allowed
|
||||||
|
StatusAbandoned = "abandoned" // deleted on youtube or banned
|
||||||
|
StatusAgeRestricted = "agerestricted" // one or more videos are age restricted and should be reprocessed with special keys
|
||||||
|
)
|
||||||
|
|
||||||
|
var SyncStatuses = []string{StatusPending, StatusPendingEmail, StatusPendingUpgrade, StatusQueued, StatusSyncing, StatusSynced, StatusFailed, StatusFinalized, StatusAbandoned, StatusWipeDb, StatusAgeRestricted}
|
||||||
|
|
||||||
|
const LatestMetadataVersion = 2
|
||||||
|
|
||||||
|
const (
|
||||||
|
VideoStatusPublished = "published"
|
||||||
|
VideoStatusFailed = "failed"
|
||||||
|
VideoStatusUpgradeFailed = "upgradefailed"
|
||||||
|
VideoStatusUnpublished = "unpublished"
|
||||||
|
VideoStatusTransferFailed = "transferfailed"
|
||||||
|
)
|
||||||
|
|
||||||
|
var VideoSyncStatuses = []string{VideoStatusPublished, VideoStatusFailed, VideoStatusUpgradeFailed, VideoStatusUnpublished, VideoStatusTransferFailed}
|
||||||
|
|
||||||
|
const (
|
||||||
|
TransferStateNotTouched = iota
|
||||||
|
TransferStatePending
|
||||||
|
TransferStateComplete
|
||||||
|
TransferStateManual
|
||||||
|
)
|
20
shared/shared_test.go
Normal file
20
shared/shared_test.go
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
package shared
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"gotest.tools/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSyncFlags_VideosToSync(t *testing.T) {
|
||||||
|
f := SyncFlags{}
|
||||||
|
assert.Equal(t, f.VideosToSync(0), 0)
|
||||||
|
assert.Equal(t, f.VideosToSync(1), 10)
|
||||||
|
assert.Equal(t, f.VideosToSync(5), 10)
|
||||||
|
assert.Equal(t, f.VideosToSync(10), 10)
|
||||||
|
assert.Equal(t, f.VideosToSync(101), 50)
|
||||||
|
assert.Equal(t, f.VideosToSync(500), 80)
|
||||||
|
assert.Equal(t, f.VideosToSync(21000), 1000)
|
||||||
|
f.VideosLimit = 1337
|
||||||
|
assert.Equal(t, f.VideosToSync(21), 1337)
|
||||||
|
}
|
|
@ -4,8 +4,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/extras/jsonrpc"
|
"github.com/lbryio/lbry.go/v2/extras/jsonrpc"
|
||||||
"github.com/lbryio/ytsync/namer"
|
"github.com/lbryio/ytsync/v5/namer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SyncSummary struct {
|
type SyncSummary struct {
|
||||||
|
|
|
@ -1,34 +1,43 @@
|
||||||
package sources
|
package sources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/extras/errors"
|
"github.com/lbryio/ytsync/v5/downloader"
|
||||||
"github.com/lbryio/lbry.go/extras/jsonrpc"
|
"github.com/lbryio/ytsync/v5/downloader/ytdl"
|
||||||
"github.com/lbryio/lbry.go/extras/stop"
|
"github.com/lbryio/ytsync/v5/ip_manager"
|
||||||
"github.com/lbryio/lbry.go/extras/util"
|
"github.com/lbryio/ytsync/v5/namer"
|
||||||
|
"github.com/lbryio/ytsync/v5/sdk"
|
||||||
|
"github.com/lbryio/ytsync/v5/shared"
|
||||||
|
"github.com/lbryio/ytsync/v5/tags_manager"
|
||||||
|
"github.com/lbryio/ytsync/v5/thumbs"
|
||||||
|
"github.com/lbryio/ytsync/v5/timing"
|
||||||
|
logUtils "github.com/lbryio/ytsync/v5/util"
|
||||||
|
|
||||||
"github.com/lbryio/ytsync/ipManager"
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
"github.com/lbryio/ytsync/namer"
|
"github.com/lbryio/lbry.go/v2/extras/jsonrpc"
|
||||||
"github.com/lbryio/ytsync/sdk"
|
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||||
"github.com/lbryio/ytsync/tagsManager"
|
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||||
"github.com/lbryio/ytsync/thumbs"
|
|
||||||
|
|
||||||
duration "github.com/ChannelMeter/iso8601duration"
|
"github.com/abadojack/whatlanggo"
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/shopspring/decimal"
|
"github.com/shopspring/decimal"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"google.golang.org/api/youtube/v3"
|
"github.com/vbauerster/mpb/v7"
|
||||||
|
"github.com/vbauerster/mpb/v7/decor"
|
||||||
|
"gopkg.in/vansante/go-ffprobe.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type YoutubeVideo struct {
|
type YoutubeVideo struct {
|
||||||
|
@ -38,18 +47,20 @@ type YoutubeVideo struct {
|
||||||
playlistPosition int64
|
playlistPosition int64
|
||||||
size *int64
|
size *int64
|
||||||
maxVideoSize int64
|
maxVideoSize int64
|
||||||
maxVideoLength float64
|
maxVideoLength time.Duration
|
||||||
publishedAt time.Time
|
publishedAt time.Time
|
||||||
dir string
|
dir string
|
||||||
youtubeInfo *youtube.Video
|
youtubeInfo *ytdl.YtdlVideo
|
||||||
youtubeChannelID string
|
youtubeChannelID string
|
||||||
tags []string
|
tags []string
|
||||||
awsConfig aws.Config
|
|
||||||
thumbnailURL string
|
thumbnailURL string
|
||||||
lbryChannelID string
|
lbryChannelID string
|
||||||
mocked bool
|
mocked bool
|
||||||
walletLock *sync.RWMutex
|
walletLock *sync.RWMutex
|
||||||
stopGroup *stop.Group
|
stopGroup *stop.Group
|
||||||
|
pool *ip_manager.IPPool
|
||||||
|
progressBars *mpb.Progress
|
||||||
|
progressBarWg *sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
var youtubeCategories = map[string]string{
|
var youtubeCategories = map[string]string{
|
||||||
|
@ -87,31 +98,33 @@ var youtubeCategories = map[string]string{
|
||||||
"44": "trailers",
|
"44": "trailers",
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewYoutubeVideo(directory string, videoData *youtube.Video, playlistPosition int64, awsConfig aws.Config, stopGroup *stop.Group) *YoutubeVideo {
|
func NewYoutubeVideo(directory string, videoData *ytdl.YtdlVideo, playlistPosition int64, stopGroup *stop.Group, pool *ip_manager.IPPool) (*YoutubeVideo, error) {
|
||||||
publishedAt, _ := time.Parse(time.RFC3339Nano, videoData.Snippet.PublishedAt) // ignore parse errors
|
// youtube-dl returns times in local timezone sometimes. this could break in the future
|
||||||
|
// maybe we can file a PR to choose the timezone we want from youtube-dl
|
||||||
return &YoutubeVideo{
|
return &YoutubeVideo{
|
||||||
id: videoData.Id,
|
id: videoData.ID,
|
||||||
title: videoData.Snippet.Title,
|
title: videoData.Title,
|
||||||
description: videoData.Snippet.Description,
|
description: videoData.Description,
|
||||||
playlistPosition: playlistPosition,
|
playlistPosition: playlistPosition,
|
||||||
publishedAt: publishedAt,
|
publishedAt: videoData.GetUploadTime(),
|
||||||
dir: directory,
|
dir: directory,
|
||||||
youtubeInfo: videoData,
|
youtubeInfo: videoData,
|
||||||
awsConfig: awsConfig,
|
|
||||||
mocked: false,
|
mocked: false,
|
||||||
youtubeChannelID: videoData.Snippet.ChannelId,
|
youtubeChannelID: videoData.ChannelID,
|
||||||
stopGroup: stopGroup,
|
stopGroup: stopGroup,
|
||||||
|
pool: pool,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
}
|
|
||||||
func NewMockedVideo(directory string, videoID string, youtubeChannelID string, awsConfig aws.Config, stopGroup *stop.Group) *YoutubeVideo {
|
func NewMockedVideo(directory string, videoID string, youtubeChannelID string, stopGroup *stop.Group, pool *ip_manager.IPPool) *YoutubeVideo {
|
||||||
return &YoutubeVideo{
|
return &YoutubeVideo{
|
||||||
id: videoID,
|
id: videoID,
|
||||||
playlistPosition: 0,
|
playlistPosition: 0,
|
||||||
dir: directory,
|
dir: directory,
|
||||||
awsConfig: awsConfig,
|
|
||||||
mocked: true,
|
mocked: true,
|
||||||
youtubeChannelID: youtubeChannelID,
|
youtubeChannelID: youtubeChannelID,
|
||||||
stopGroup: stopGroup,
|
stopGroup: stopGroup,
|
||||||
|
pool: pool,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,7 +175,7 @@ func (v *YoutubeVideo) getFullPath() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *YoutubeVideo) getAbbrevDescription() string {
|
func (v *YoutubeVideo) getAbbrevDescription() string {
|
||||||
maxLength := 2800
|
maxLength := 6500
|
||||||
description := strings.TrimSpace(v.description)
|
description := strings.TrimSpace(v.description)
|
||||||
additionalDescription := "\nhttps://www.youtube.com/watch?v=" + v.id
|
additionalDescription := "\nhttps://www.youtube.com/watch?v=" + v.id
|
||||||
khanAcademyClaimID := "5fc52291980268b82413ca4c0ace1b8d749f3ffb"
|
khanAcademyClaimID := "5fc52291980268b82413ca4c0ace1b8d749f3ffb"
|
||||||
|
@ -174,8 +187,32 @@ func (v *YoutubeVideo) getAbbrevDescription() string {
|
||||||
}
|
}
|
||||||
return description + "\n..." + additionalDescription
|
return description + "\n..." + additionalDescription
|
||||||
}
|
}
|
||||||
|
func checkCookiesIntegrity() error {
|
||||||
|
fi, err := os.Stat("cookies.txt")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
if fi.Size() == 0 {
|
||||||
|
log.Errorf("cookies were cleared out. Attempting a restore from cookies-backup.txt")
|
||||||
|
input, err := ioutil.ReadFile("cookies-backup.txt")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile("cookies.txt", input, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *YoutubeVideo) download() error {
|
||||||
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("download").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
|
|
||||||
func (v *YoutubeVideo) download(useIPv6 bool) error {
|
|
||||||
videoPath := v.getFullPath()
|
videoPath := v.getFullPath()
|
||||||
|
|
||||||
err := os.Mkdir(v.videoDir(), 0777)
|
err := os.Mkdir(v.videoDir(), 0777)
|
||||||
|
@ -194,20 +231,54 @@ func (v *YoutubeVideo) download(useIPv6 bool) error {
|
||||||
"1080",
|
"1080",
|
||||||
"720",
|
"720",
|
||||||
"480",
|
"480",
|
||||||
"320",
|
"360",
|
||||||
}
|
}
|
||||||
qualityIndex := 0
|
dur := time.Duration(v.youtubeInfo.Duration) * time.Second
|
||||||
|
if dur.Hours() > 1 { //for videos longer than 1 hour only sync up to 720p
|
||||||
|
qualities = []string{
|
||||||
|
"720",
|
||||||
|
"480",
|
||||||
|
"360",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metadataPath := path.Join(logUtils.GetVideoMetadataDir(), v.id+".info.json")
|
||||||
|
_, err = os.Stat(metadataPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return errors.Err("metadata information for video %s is missing! Why?", v.id)
|
||||||
|
}
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata, err := parseVideoMetadata(metadataPath)
|
||||||
|
|
||||||
|
err = checkCookiesIntegrity()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
ytdlArgs := []string{
|
ytdlArgs := []string{
|
||||||
"--no-progress",
|
"--no-progress",
|
||||||
"-o" + strings.TrimSuffix(v.getFullPath(), ".mp4"),
|
"-o" + strings.TrimSuffix(v.getFullPath(), ".mp4"),
|
||||||
"--merge-output-format",
|
"--merge-output-format",
|
||||||
"mp4",
|
"mp4",
|
||||||
"--postprocessor-args",
|
"--postprocessor-args",
|
||||||
"-movflags faststart",
|
"ffmpeg:-movflags faststart",
|
||||||
"--abort-on-unavailable-fragment",
|
"--abort-on-unavailable-fragment",
|
||||||
"--fragment-retries",
|
"--fragment-retries",
|
||||||
"0",
|
"1",
|
||||||
|
"--cookies",
|
||||||
|
"cookies.txt",
|
||||||
|
"--extractor-args",
|
||||||
|
"youtube:player_client=android",
|
||||||
|
//"--concurrent-fragments",
|
||||||
|
//"2",
|
||||||
|
"--load-info-json",
|
||||||
|
metadataPath,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
userAgent := []string{"--user-agent", downloader.ChromeUA}
|
||||||
if v.maxVideoSize > 0 {
|
if v.maxVideoSize > 0 {
|
||||||
ytdlArgs = append(ytdlArgs,
|
ytdlArgs = append(ytdlArgs,
|
||||||
"--max-filesize",
|
"--max-filesize",
|
||||||
|
@ -217,51 +288,46 @@ func (v *YoutubeVideo) download(useIPv6 bool) error {
|
||||||
if v.maxVideoLength > 0 {
|
if v.maxVideoLength > 0 {
|
||||||
ytdlArgs = append(ytdlArgs,
|
ytdlArgs = append(ytdlArgs,
|
||||||
"--match-filter",
|
"--match-filter",
|
||||||
fmt.Sprintf("duration <= %d", int(math.Round(v.maxVideoLength*3600))),
|
fmt.Sprintf("duration <= %d", int(v.maxVideoLength.Seconds())),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
sourceAddress, err := ipManager.GetNextIP(useIPv6)
|
|
||||||
if err != nil {
|
var sourceAddress string
|
||||||
if sourceAddress == "throttled" {
|
|
||||||
for {
|
for {
|
||||||
|
sourceAddress, err = v.pool.GetIP(v.id)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ip_manager.ErrAllThrottled) {
|
||||||
select {
|
select {
|
||||||
case <-v.stopGroup.Ch():
|
case <-v.stopGroup.Ch():
|
||||||
return errors.Err("interrupted by user")
|
return errors.Err("interrupted by user")
|
||||||
default:
|
default:
|
||||||
|
time.Sleep(ip_manager.IPCooldownPeriod)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(ipManager.IPCooldownPeriod)
|
|
||||||
sourceAddress, err = ipManager.GetNextIP(useIPv6)
|
|
||||||
if err == nil {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
defer v.pool.ReleaseIP(sourceAddress)
|
||||||
} else {
|
|
||||||
return errors.Err(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer ipManager.ReleaseIP(sourceAddress)
|
|
||||||
if useIPv6 {
|
|
||||||
log.Infof("using IPv6: %s", sourceAddress)
|
|
||||||
ytdlArgs = append(ytdlArgs,
|
|
||||||
"-6",
|
|
||||||
"--source-address",
|
|
||||||
sourceAddress,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
log.Infof("using IPv4: %s", sourceAddress)
|
|
||||||
ytdlArgs = append(ytdlArgs,
|
|
||||||
"-4",
|
|
||||||
"--source-address",
|
|
||||||
sourceAddress,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
ytdlArgs = append(ytdlArgs, "https://www.youtube.com/watch?v="+v.ID())
|
|
||||||
runcmd:
|
|
||||||
argsWithFilters := append(ytdlArgs, "-fbestvideo[ext=mp4][height<="+qualities[qualityIndex]+"]+bestaudio[ext!=webm]")
|
|
||||||
cmd := exec.Command("youtube-dl", argsWithFilters...)
|
|
||||||
|
|
||||||
log.Printf("Running command youtube-dl %s", strings.Join(argsWithFilters, " "))
|
ytdlArgs = append(ytdlArgs,
|
||||||
|
"--source-address",
|
||||||
|
sourceAddress,
|
||||||
|
fmt.Sprintf("https://www.youtube.com/watch?v=%s", v.id),
|
||||||
|
)
|
||||||
|
//speedThrottleRetries := 3
|
||||||
|
for i := 0; i < len(qualities); i++ {
|
||||||
|
quality := qualities[i]
|
||||||
|
argsWithFilters := append(ytdlArgs, "-fbestvideo[ext=mp4][vcodec!*=av01][height<="+quality+"]+bestaudio[ext!=webm][format_id!=258][format_id!=380][format_id!=251][format_id!=256][format_id!=327][format_id!=328]")
|
||||||
|
argsWithFilters = append(argsWithFilters, userAgent...)
|
||||||
|
//if speedThrottleRetries > 0 {
|
||||||
|
// speedThrottleRetries--
|
||||||
|
// argsWithFilters = append(argsWithFilters, "--throttled-rate", "180K")
|
||||||
|
//}
|
||||||
|
cmd := exec.Command("yt-dlp", argsWithFilters...)
|
||||||
|
log.Printf("Running command yt-dlp %s", strings.Join(argsWithFilters, " "))
|
||||||
|
|
||||||
stderr, err := cmd.StderrPipe()
|
stderr, err := cmd.StderrPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -276,16 +342,40 @@ runcmd:
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dlStopGrp := stop.New()
|
||||||
|
|
||||||
|
ticker := time.NewTicker(400 * time.Millisecond)
|
||||||
|
go v.trackProgressBar(argsWithFilters, ticker, metadata, dlStopGrp, sourceAddress)
|
||||||
|
|
||||||
|
//ticker2 := time.NewTicker(10 * time.Second)
|
||||||
|
//v.monitorSlowDownload(ticker, dlStopGrp, sourceAddress, cmd)
|
||||||
|
|
||||||
errorLog, _ := ioutil.ReadAll(stderr)
|
errorLog, _ := ioutil.ReadAll(stderr)
|
||||||
outLog, _ := ioutil.ReadAll(stdout)
|
outLog, _ := ioutil.ReadAll(stdout)
|
||||||
|
err = cmd.Wait()
|
||||||
|
|
||||||
if err = cmd.Wait(); err != nil {
|
//stop the progress bar
|
||||||
|
ticker.Stop()
|
||||||
|
dlStopGrp.Stop()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "exit status 1") {
|
if strings.Contains(err.Error(), "exit status 1") {
|
||||||
if strings.Contains(string(errorLog), "HTTP Error 429") {
|
if strings.Contains(string(errorLog), "HTTP Error 429") || strings.Contains(string(errorLog), "returned non-zero exit status 8") {
|
||||||
ipManager.SetIpThrottled(sourceAddress, v.stopGroup)
|
v.pool.SetThrottled(sourceAddress)
|
||||||
} else if strings.Contains(string(errorLog), "giving up after 0 fragment retries") && qualityIndex < len(qualities)-1 {
|
} else if strings.Contains(string(errorLog), "giving up after 0 fragment retries") {
|
||||||
qualityIndex++
|
if i == (len(qualities) - 1) {
|
||||||
goto runcmd
|
return errors.Err(string(errorLog))
|
||||||
|
}
|
||||||
|
continue //this bypasses the yt throttling IP redistribution... TODO: don't
|
||||||
|
} else if strings.Contains(string(errorLog), "YouTube said: Unable to extract video data") && !strings.Contains(userAgent[1], "Googlebot") {
|
||||||
|
i-- //do not lower quality when trying a different user agent
|
||||||
|
userAgent = []string{downloader.GoogleBotUA}
|
||||||
|
log.Infof("trying different user agent for video %s", v.ID())
|
||||||
|
continue
|
||||||
|
//} else if strings.Contains(string(errorLog), "yt_dlp.utils.ThrottledDownload") {
|
||||||
|
// log.Infof("throttled download speed for video %s. Retrying", v.ID())
|
||||||
|
// i-- //do not lower quality when we're retrying a throttled download
|
||||||
|
// continue
|
||||||
}
|
}
|
||||||
return errors.Err(string(errorLog))
|
return errors.Err(string(errorLog))
|
||||||
}
|
}
|
||||||
|
@ -302,6 +392,9 @@ runcmd:
|
||||||
return errors.Err("the video is too big to sync, skipping for now")
|
return errors.Err("the video is too big to sync, skipping for now")
|
||||||
}
|
}
|
||||||
if string(errorLog) != "" {
|
if string(errorLog) != "" {
|
||||||
|
if strings.Contains(string(errorLog), "HTTP Error 429") {
|
||||||
|
v.pool.SetThrottled(sourceAddress)
|
||||||
|
}
|
||||||
log.Printf("Command finished with error: %v", errors.Err(string(errorLog)))
|
log.Printf("Command finished with error: %v", errors.Err(string(errorLog)))
|
||||||
_ = v.delete("due to error")
|
_ = v.delete("due to error")
|
||||||
return errors.Err(string(errorLog))
|
return errors.Err(string(errorLog))
|
||||||
|
@ -316,12 +409,245 @@ runcmd:
|
||||||
}
|
}
|
||||||
videoSize := fi.Size()
|
videoSize := fi.Size()
|
||||||
v.size = &videoSize
|
v.size = &videoSize
|
||||||
|
break
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (v *YoutubeVideo) monitorSlowDownload(ticker *time.Ticker, stop *stop.Group, address string, cmd *exec.Cmd) {
|
||||||
|
count := 0
|
||||||
|
lastSize := int64(0)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-stop.Ch():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
size, err := logUtils.DirSize(v.videoDir())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error while getting size of download directory: %s", errors.FullTrace(err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delta := size - lastSize
|
||||||
|
avgSpeed := delta / 10
|
||||||
|
if avgSpeed < 200*1024 { //200 KB/s
|
||||||
|
count++
|
||||||
|
} else {
|
||||||
|
count--
|
||||||
|
}
|
||||||
|
if count > 3 {
|
||||||
|
err := cmd.Process.Signal(syscall.SIGKILL)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failure in killing slow download: %s", errors.Err(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *YoutubeVideo) trackProgressBar(argsWithFilters []string, ticker *time.Ticker, metadata *ytMetadata, done *stop.Group, sourceAddress string) {
|
||||||
|
v.progressBarWg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer v.progressBarWg.Done()
|
||||||
|
//get size of the video before downloading
|
||||||
|
cmd := exec.Command("yt-dlp", append(argsWithFilters, "-s")...)
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error while getting final file size: %s", errors.FullTrace(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
log.Errorf("error while getting final file size: %s", errors.FullTrace(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
outLog, _ := ioutil.ReadAll(stdout)
|
||||||
|
err = cmd.Wait()
|
||||||
|
output := string(outLog)
|
||||||
|
parts := strings.Split(output, ": ")
|
||||||
|
if len(parts) != 3 {
|
||||||
|
log.Errorf("couldn't parse audio and video parts from the output (%s)", output)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
formats := strings.Split(parts[2], "+")
|
||||||
|
if len(formats) != 2 {
|
||||||
|
log.Errorf("couldn't parse formats from the output (%s)", output)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Debugf("'%s'", output)
|
||||||
|
videoFormat := formats[0]
|
||||||
|
audioFormat := strings.Replace(formats[1], "\n", "", -1)
|
||||||
|
|
||||||
|
videoSize := 0
|
||||||
|
audioSize := 0
|
||||||
|
if metadata != nil {
|
||||||
|
for _, f := range metadata.Formats {
|
||||||
|
if f.FormatID == videoFormat {
|
||||||
|
videoSize = f.Filesize
|
||||||
|
}
|
||||||
|
if f.FormatID == audioFormat {
|
||||||
|
audioSize = f.Filesize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("(%s) - videoSize: %d (%s), audiosize: %d (%s)", v.id, videoSize, videoFormat, audioSize, audioFormat)
|
||||||
|
bar := v.progressBars.AddBar(int64(videoSize+audioSize),
|
||||||
|
mpb.PrependDecorators(
|
||||||
|
decor.CountersKibiByte("% .2f / % .2f "),
|
||||||
|
// simple name decorator
|
||||||
|
decor.Name(fmt.Sprintf("id: %s src-ip: (%s)", v.id, sourceAddress)),
|
||||||
|
// decor.DSyncWidth bit enables column width synchronization
|
||||||
|
decor.Percentage(decor.WCSyncSpace),
|
||||||
|
),
|
||||||
|
mpb.AppendDecorators(
|
||||||
|
decor.EwmaETA(decor.ET_STYLE_GO, 90),
|
||||||
|
decor.Name(" ] "),
|
||||||
|
decor.EwmaSpeed(decor.UnitKiB, "% .2f ", 60),
|
||||||
|
decor.OnComplete(
|
||||||
|
// ETA decorator with ewma age of 60
|
||||||
|
decor.EwmaETA(decor.ET_STYLE_GO, 60), "done",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
mpb.BarRemoveOnComplete(),
|
||||||
|
)
|
||||||
|
defer func() {
|
||||||
|
bar.Completed()
|
||||||
|
bar.Abort(true)
|
||||||
|
}()
|
||||||
|
origSize := int64(0)
|
||||||
|
lastUpdate := time.Now()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-done.Ch():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
var err error
|
||||||
|
size, err := logUtils.DirSize(v.videoDir())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error while getting size of download directory: %s", errors.FullTrace(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if size > origSize {
|
||||||
|
origSize = size
|
||||||
|
bar.SetCurrent(size)
|
||||||
|
if size > int64(videoSize+audioSize) {
|
||||||
|
bar.SetTotal(size+2048, false)
|
||||||
|
}
|
||||||
|
bar.DecoratorEwmaUpdate(time.Since(lastUpdate))
|
||||||
|
lastUpdate = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
type ytMetadata struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
Formats []struct {
|
||||||
|
Asr int `json:"asr"`
|
||||||
|
Filesize int `json:"filesize"`
|
||||||
|
FormatID string `json:"format_id"`
|
||||||
|
FormatNote string `json:"format_note"`
|
||||||
|
Fps interface{} `json:"fps"`
|
||||||
|
Height interface{} `json:"height"`
|
||||||
|
Quality int `json:"quality"`
|
||||||
|
Tbr float64 `json:"tbr"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
Width interface{} `json:"width"`
|
||||||
|
Ext string `json:"ext"`
|
||||||
|
Vcodec string `json:"vcodec"`
|
||||||
|
Acodec string `json:"acodec"`
|
||||||
|
Abr float64 `json:"abr,omitempty"`
|
||||||
|
DownloaderOptions struct {
|
||||||
|
HTTPChunkSize int `json:"http_chunk_size"`
|
||||||
|
} `json:"downloader_options,omitempty"`
|
||||||
|
Container string `json:"container,omitempty"`
|
||||||
|
Format string `json:"format"`
|
||||||
|
Protocol string `json:"protocol"`
|
||||||
|
HTTPHeaders struct {
|
||||||
|
UserAgent string `json:"User-Agent"`
|
||||||
|
AcceptCharset string `json:"Accept-Charset"`
|
||||||
|
Accept string `json:"Accept"`
|
||||||
|
AcceptEncoding string `json:"Accept-Encoding"`
|
||||||
|
AcceptLanguage string `json:"Accept-Language"`
|
||||||
|
} `json:"http_headers"`
|
||||||
|
Vbr float64 `json:"vbr,omitempty"`
|
||||||
|
} `json:"formats"`
|
||||||
|
Thumbnails []struct {
|
||||||
|
Height int `json:"height"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
Width int `json:"width"`
|
||||||
|
Resolution string `json:"resolution"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
} `json:"thumbnails"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
UploadDate string `json:"upload_date"`
|
||||||
|
Uploader string `json:"uploader"`
|
||||||
|
UploaderID string `json:"uploader_id"`
|
||||||
|
UploaderURL string `json:"uploader_url"`
|
||||||
|
ChannelID string `json:"channel_id"`
|
||||||
|
ChannelURL string `json:"channel_url"`
|
||||||
|
Duration int `json:"duration"`
|
||||||
|
ViewCount int `json:"view_count"`
|
||||||
|
AverageRating float64 `json:"average_rating"`
|
||||||
|
AgeLimit int `json:"age_limit"`
|
||||||
|
WebpageURL string `json:"webpage_url"`
|
||||||
|
Categories []string `json:"categories"`
|
||||||
|
Tags []interface{} `json:"tags"`
|
||||||
|
IsLive interface{} `json:"is_live"`
|
||||||
|
LikeCount int `json:"like_count"`
|
||||||
|
DislikeCount int `json:"dislike_count"`
|
||||||
|
Channel string `json:"channel"`
|
||||||
|
Extractor string `json:"extractor"`
|
||||||
|
WebpageURLBasename string `json:"webpage_url_basename"`
|
||||||
|
ExtractorKey string `json:"extractor_key"`
|
||||||
|
Playlist interface{} `json:"playlist"`
|
||||||
|
PlaylistIndex interface{} `json:"playlist_index"`
|
||||||
|
Thumbnail string `json:"thumbnail"`
|
||||||
|
DisplayID string `json:"display_id"`
|
||||||
|
Format string `json:"format"`
|
||||||
|
FormatID string `json:"format_id"`
|
||||||
|
Width int `json:"width"`
|
||||||
|
Height int `json:"height"`
|
||||||
|
Resolution interface{} `json:"resolution"`
|
||||||
|
Fps int `json:"fps"`
|
||||||
|
Vcodec string `json:"vcodec"`
|
||||||
|
Vbr float64 `json:"vbr"`
|
||||||
|
StretchedRatio interface{} `json:"stretched_ratio"`
|
||||||
|
Acodec string `json:"acodec"`
|
||||||
|
Abr float64 `json:"abr"`
|
||||||
|
Ext string `json:"ext"`
|
||||||
|
Fulltitle string `json:"fulltitle"`
|
||||||
|
Filename string `json:"_filename"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseVideoMetadata(metadataPath string) (*ytMetadata, error) {
|
||||||
|
f, err := os.Open(metadataPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
// defer the closing of our jsonFile so that we can parse it later on
|
||||||
|
defer f.Close()
|
||||||
|
// read our opened jsonFile as a byte array.
|
||||||
|
byteValue, _ := ioutil.ReadAll(f)
|
||||||
|
|
||||||
|
// we initialize our Users array
|
||||||
|
var m ytMetadata
|
||||||
|
|
||||||
|
// we unmarshal our byteArray which contains our
|
||||||
|
// jsonFile's content into 'users' which we defined above
|
||||||
|
err = json.Unmarshal(byteValue, &m)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
return &m, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (v *YoutubeVideo) videoDir() string {
|
func (v *YoutubeVideo) videoDir() string {
|
||||||
return v.dir + "/" + v.id
|
return path.Join(v.dir, v.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *YoutubeVideo) getDownloadedPath() (string, error) {
|
func (v *YoutubeVideo) getDownloadedPath() (string, error) {
|
||||||
files, err := ioutil.ReadDir(v.videoDir())
|
files, err := ioutil.ReadDir(v.videoDir())
|
||||||
log.Infoln(v.videoDir())
|
log.Infoln(v.videoDir())
|
||||||
|
@ -336,7 +662,7 @@ func (v *YoutubeVideo) getDownloadedPath() (string, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if strings.Contains(v.getFullPath(), strings.TrimSuffix(f.Name(), filepath.Ext(f.Name()))) {
|
if strings.Contains(v.getFullPath(), strings.TrimSuffix(f.Name(), filepath.Ext(f.Name()))) {
|
||||||
return v.videoDir() + "/" + f.Name(), nil
|
return path.Join(v.videoDir(), f.Name()), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", errors.Err("could not find any downloaded videos")
|
return "", errors.Err("could not find any downloaded videos")
|
||||||
|
@ -361,14 +687,20 @@ func (v *YoutubeVideo) delete(reason string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *YoutubeVideo) triggerThumbnailSave() (err error) {
|
func (v *YoutubeVideo) triggerThumbnailSave() (err error) {
|
||||||
thumbnail := thumbs.GetBestThumbnail(v.youtubeInfo.Snippet.Thumbnails)
|
thumbnail := thumbs.GetBestThumbnail(v.youtubeInfo.Thumbnails)
|
||||||
v.thumbnailURL, err = thumbs.MirrorThumbnail(thumbnail.Url, v.ID(), v.awsConfig)
|
if thumbnail.Width == 0 {
|
||||||
|
return errors.Err("default youtube thumbnail found")
|
||||||
|
}
|
||||||
|
v.thumbnailURL, err = thumbs.MirrorThumbnail(thumbnail.URL, v.ID())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *YoutubeVideo) publish(daemon *jsonrpc.Client, params SyncParams) (*SyncSummary, error) {
|
func (v *YoutubeVideo) publish(daemon *jsonrpc.Client, params SyncParams) (*SyncSummary, error) {
|
||||||
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("publish").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
languages, locations, tags := v.getMetadata()
|
languages, locations, tags := v.getMetadata()
|
||||||
|
|
||||||
var fee *jsonrpc.Fee
|
var fee *jsonrpc.Fee
|
||||||
if params.Fee != nil {
|
if params.Fee != nil {
|
||||||
feeAmount, err := decimal.NewFromString(params.Fee.Amount)
|
feeAmount, err := decimal.NewFromString(params.Fee.Amount)
|
||||||
|
@ -381,7 +713,17 @@ func (v *YoutubeVideo) publish(daemon *jsonrpc.Client, params SyncParams) (*Sync
|
||||||
FeeCurrency: jsonrpc.Currency(params.Fee.Currency),
|
FeeCurrency: jsonrpc.Currency(params.Fee.Currency),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
urlsRegex := regexp.MustCompile(`(?m) ?(f|ht)(tp)(s?)(://)(.*)[.|/](.*)`)
|
||||||
|
descriptionSample := urlsRegex.ReplaceAllString(v.description, "")
|
||||||
|
info := whatlanggo.Detect(descriptionSample)
|
||||||
|
info2 := whatlanggo.Detect(v.title)
|
||||||
|
if info.IsReliable() && info.Lang.Iso6391() != "" {
|
||||||
|
language := info.Lang.Iso6391()
|
||||||
|
languages = []string{language}
|
||||||
|
} else if info2.IsReliable() && info2.Lang.Iso6391() != "" {
|
||||||
|
language := info2.Lang.Iso6391()
|
||||||
|
languages = []string{language}
|
||||||
|
}
|
||||||
options := jsonrpc.StreamCreateOptions{
|
options := jsonrpc.StreamCreateOptions{
|
||||||
ClaimCreateOptions: jsonrpc.ClaimCreateOptions{
|
ClaimCreateOptions: jsonrpc.ClaimCreateOptions{
|
||||||
Title: &v.title,
|
Title: &v.title,
|
||||||
|
@ -391,6 +733,9 @@ func (v *YoutubeVideo) publish(daemon *jsonrpc.Client, params SyncParams) (*Sync
|
||||||
ThumbnailURL: &v.thumbnailURL,
|
ThumbnailURL: &v.thumbnailURL,
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
Locations: locations,
|
Locations: locations,
|
||||||
|
FundingAccountIDs: []string{
|
||||||
|
params.DefaultAccount,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Fee: fee,
|
Fee: fee,
|
||||||
License: util.PtrToString("Copyrighted (contact publisher)"),
|
License: util.PtrToString("Copyrighted (contact publisher)"),
|
||||||
|
@ -414,15 +759,18 @@ type SyncParams struct {
|
||||||
ChannelID string
|
ChannelID string
|
||||||
MaxVideoSize int
|
MaxVideoSize int
|
||||||
Namer *namer.Namer
|
Namer *namer.Namer
|
||||||
MaxVideoLength float64
|
MaxVideoLength time.Duration
|
||||||
Fee *sdk.Fee
|
Fee *shared.Fee
|
||||||
|
DefaultAccount string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *YoutubeVideo) Sync(daemon *jsonrpc.Client, params SyncParams, existingVideoData *sdk.SyncedVideo, reprocess bool, walletLock *sync.RWMutex) (*SyncSummary, error) {
|
func (v *YoutubeVideo) Sync(daemon *jsonrpc.Client, params SyncParams, existingVideoData *sdk.SyncedVideo, reprocess bool, walletLock *sync.RWMutex, pbWg *sync.WaitGroup, pb *mpb.Progress) (*SyncSummary, error) {
|
||||||
v.maxVideoSize = int64(params.MaxVideoSize)
|
v.maxVideoSize = int64(params.MaxVideoSize)
|
||||||
v.maxVideoLength = params.MaxVideoLength
|
v.maxVideoLength = params.MaxVideoLength
|
||||||
v.lbryChannelID = params.ChannelID
|
v.lbryChannelID = params.ChannelID
|
||||||
v.walletLock = walletLock
|
v.walletLock = walletLock
|
||||||
|
v.progressBars = pb
|
||||||
|
v.progressBarWg = pbWg
|
||||||
if reprocess && existingVideoData != nil && existingVideoData.Published {
|
if reprocess && existingVideoData != nil && existingVideoData.Published {
|
||||||
summary, err := v.reprocess(daemon, params, existingVideoData)
|
summary, err := v.reprocess(daemon, params, existingVideoData)
|
||||||
return summary, errors.Prefix("upgrade failed", err)
|
return summary, errors.Prefix("upgrade failed", err)
|
||||||
|
@ -430,23 +778,55 @@ func (v *YoutubeVideo) Sync(daemon *jsonrpc.Client, params SyncParams, existingV
|
||||||
return v.downloadAndPublish(daemon, params)
|
return v.downloadAndPublish(daemon, params)
|
||||||
}
|
}
|
||||||
|
|
||||||
var isThrottled bool
|
|
||||||
|
|
||||||
func (v *YoutubeVideo) downloadAndPublish(daemon *jsonrpc.Client, params SyncParams) (*SyncSummary, error) {
|
func (v *YoutubeVideo) downloadAndPublish(daemon *jsonrpc.Client, params SyncParams) (*SyncSummary, error) {
|
||||||
err := v.download(isThrottled)
|
var err error
|
||||||
if err != nil {
|
if v.youtubeInfo == nil {
|
||||||
if strings.Contains(err.Error(), "HTTP Error 429") && !isThrottled {
|
return nil, errors.Err("Video is not available - hardcoded fix")
|
||||||
isThrottled = true
|
|
||||||
err = v.download(isThrottled)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Prefix("download error", err)
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
return nil, errors.Prefix("download error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Debugln("Downloaded " + v.id)
|
|
||||||
|
|
||||||
|
dur := time.Duration(v.youtubeInfo.Duration) * time.Second
|
||||||
|
minDuration := 7 * time.Second
|
||||||
|
|
||||||
|
if v.youtubeInfo.IsLive == true {
|
||||||
|
return nil, errors.Err("video is a live stream and hasn't completed yet")
|
||||||
|
}
|
||||||
|
if v.youtubeInfo.Availability != "public" {
|
||||||
|
return nil, errors.Err("video is not public")
|
||||||
|
}
|
||||||
|
if dur > v.maxVideoLength {
|
||||||
|
logUtils.SendErrorToSlack("%s is %s long and the limit is %s", v.id, dur.String(), v.maxVideoLength.String())
|
||||||
|
return nil, errors.Err("video is too long to process")
|
||||||
|
}
|
||||||
|
if dur < minDuration {
|
||||||
|
logUtils.SendErrorToSlack("%s is %s long and the minimum is %s", v.id, dur.String(), minDuration.String())
|
||||||
|
return nil, errors.Err("video is too short to process")
|
||||||
|
}
|
||||||
|
|
||||||
|
buggedLivestream := v.youtubeInfo.LiveStatus == "post_live"
|
||||||
|
if buggedLivestream && dur >= 2*time.Hour {
|
||||||
|
return nil, errors.Err("livestream is likely bugged as it was recently published and has a length of %s which is more than 2 hours", dur.String())
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
err = v.download()
|
||||||
|
if err != nil && strings.Contains(err.Error(), "HTTP Error 429") {
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, errors.Prefix("download error", err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancelFn()
|
||||||
|
|
||||||
|
data, err := ffprobe.ProbeURL(ctx, v.getFullPath())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failure in probing downloaded video: %s", err.Error())
|
||||||
|
} else {
|
||||||
|
if data.Format.Duration() < minDuration {
|
||||||
|
return nil, errors.Err("video is too short to process")
|
||||||
|
}
|
||||||
|
}
|
||||||
err = v.triggerThumbnailSave()
|
err = v.triggerThumbnailSave()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Prefix("thumbnail error", err)
|
return nil, errors.Prefix("thumbnail error", err)
|
||||||
|
@ -465,31 +845,41 @@ func (v *YoutubeVideo) getMetadata() (languages []string, locations []jsonrpc.Lo
|
||||||
locations = nil
|
locations = nil
|
||||||
tags = nil
|
tags = nil
|
||||||
if !v.mocked {
|
if !v.mocked {
|
||||||
|
/*
|
||||||
if v.youtubeInfo.Snippet.DefaultLanguage != "" {
|
if v.youtubeInfo.Snippet.DefaultLanguage != "" {
|
||||||
languages = []string{v.youtubeInfo.Snippet.DefaultLanguage}
|
if v.youtubeInfo.Snippet.DefaultLanguage == "iw" {
|
||||||
|
v.youtubeInfo.Snippet.DefaultLanguage = "he"
|
||||||
}
|
}
|
||||||
|
languages = []string{v.youtubeInfo.Snippet.DefaultLanguage}
|
||||||
|
}*/
|
||||||
|
|
||||||
if v.youtubeInfo.RecordingDetails != nil && v.youtubeInfo.RecordingDetails.Location != nil {
|
/*if v.youtubeInfo.!= nil && v.youtubeInfo.RecordingDetails.Location != nil {
|
||||||
locations = []jsonrpc.Location{{
|
locations = []jsonrpc.Location{{
|
||||||
Latitude: util.PtrToString(fmt.Sprintf("%.7f", v.youtubeInfo.RecordingDetails.Location.Latitude)),
|
Latitude: util.PtrToString(fmt.Sprintf("%.7f", v.youtubeInfo.RecordingDetails.Location.Latitude)),
|
||||||
Longitude: util.PtrToString(fmt.Sprintf("%.7f", v.youtubeInfo.RecordingDetails.Location.Longitude)),
|
Longitude: util.PtrToString(fmt.Sprintf("%.7f", v.youtubeInfo.RecordingDetails.Location.Longitude)),
|
||||||
}}
|
}}
|
||||||
|
}*/
|
||||||
|
tags = v.youtubeInfo.Tags
|
||||||
}
|
}
|
||||||
tags = v.youtubeInfo.Snippet.Tags
|
tags, err := tags_manager.SanitizeTags(tags, v.youtubeChannelID)
|
||||||
}
|
|
||||||
tags, err := tagsManager.SanitizeTags(tags, v.youtubeChannelID)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorln(err.Error())
|
log.Errorln(err.Error())
|
||||||
}
|
}
|
||||||
if !v.mocked {
|
if !v.mocked {
|
||||||
tags = append(tags, youtubeCategories[v.youtubeInfo.Snippet.CategoryId])
|
for _, category := range v.youtubeInfo.Categories {
|
||||||
|
tags = append(tags, youtubeCategories[category])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return languages, locations, tags
|
return languages, locations, tags
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *YoutubeVideo) reprocess(daemon *jsonrpc.Client, params SyncParams, existingVideoData *sdk.SyncedVideo) (*SyncSummary, error) {
|
func (v *YoutubeVideo) reprocess(daemon *jsonrpc.Client, params SyncParams, existingVideoData *sdk.SyncedVideo) (*SyncSummary, error) {
|
||||||
c, err := daemon.ClaimSearch(nil, &existingVideoData.ClaimID, nil, nil)
|
c, err := daemon.ClaimSearch(jsonrpc.ClaimSearchArgs{
|
||||||
|
ClaimID: &existingVideoData.ClaimID,
|
||||||
|
Page: 1,
|
||||||
|
PageSize: 20,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Err(err)
|
return nil, errors.Err(err)
|
||||||
}
|
}
|
||||||
|
@ -507,8 +897,8 @@ func (v *YoutubeVideo) reprocess(daemon *jsonrpc.Client, params SyncParams, exis
|
||||||
if v.mocked {
|
if v.mocked {
|
||||||
return nil, errors.Err("could not find thumbnail for mocked video")
|
return nil, errors.Err("could not find thumbnail for mocked video")
|
||||||
}
|
}
|
||||||
thumbnail := thumbs.GetBestThumbnail(v.youtubeInfo.Snippet.Thumbnails)
|
thumbnail := thumbs.GetBestThumbnail(v.youtubeInfo.Thumbnails)
|
||||||
thumbnailURL, err = thumbs.MirrorThumbnail(thumbnail.Url, v.ID(), v.awsConfig)
|
thumbnailURL, err = thumbs.MirrorThumbnail(thumbnail.URL, v.ID())
|
||||||
} else {
|
} else {
|
||||||
thumbnailURL = thumbs.ThumbnailEndpoint + v.ID()
|
thumbnailURL = thumbs.ThumbnailEndpoint + v.ID()
|
||||||
}
|
}
|
||||||
|
@ -519,8 +909,14 @@ func (v *YoutubeVideo) reprocess(daemon *jsonrpc.Client, params SyncParams, exis
|
||||||
videoSize = uint64(existingVideoData.Size)
|
videoSize = uint64(existingVideoData.Size)
|
||||||
} else {
|
} else {
|
||||||
log.Infof("%s: the video must be republished as we can't get the right size", v.ID())
|
log.Infof("%s: the video must be republished as we can't get the right size", v.ID())
|
||||||
//return v.downloadAndPublish(daemon, params) //TODO: actually republish the video. NB: the current claim should be abandoned first
|
if !v.mocked {
|
||||||
return nil, errors.Err("the video must be republished as we can't get the right size")
|
_, err = daemon.StreamAbandon(currentClaim.Txid, currentClaim.Nout, nil, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
return v.downloadAndPublish(daemon, params)
|
||||||
|
}
|
||||||
|
return nil, errors.Prefix("the video must be republished as we can't get the right size and it doesn't exist on youtube anymore", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
v.size = util.PtrToInt64(int64(videoSize))
|
v.size = util.PtrToInt64(int64(videoSize))
|
||||||
|
@ -542,6 +938,9 @@ func (v *YoutubeVideo) reprocess(daemon *jsonrpc.Client, params SyncParams, exis
|
||||||
ThumbnailURL: &thumbnailURL,
|
ThumbnailURL: &thumbnailURL,
|
||||||
Languages: languages,
|
Languages: languages,
|
||||||
Locations: locations,
|
Locations: locations,
|
||||||
|
FundingAccountIDs: []string{
|
||||||
|
params.DefaultAccount,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Author: util.PtrToString(""),
|
Author: util.PtrToString(""),
|
||||||
License: util.PtrToString("Copyrighted (contact publisher)"),
|
License: util.PtrToString("Copyrighted (contact publisher)"),
|
||||||
|
@ -549,15 +948,18 @@ func (v *YoutubeVideo) reprocess(daemon *jsonrpc.Client, params SyncParams, exis
|
||||||
Height: util.PtrToUint(720),
|
Height: util.PtrToUint(720),
|
||||||
Width: util.PtrToUint(1280),
|
Width: util.PtrToUint(1280),
|
||||||
Fee: fee,
|
Fee: fee,
|
||||||
|
ReleaseTime: util.PtrToInt64(v.publishedAt.Unix()),
|
||||||
}
|
}
|
||||||
|
|
||||||
v.walletLock.RLock()
|
v.walletLock.RLock()
|
||||||
defer v.walletLock.RUnlock()
|
defer v.walletLock.RUnlock()
|
||||||
if v.mocked {
|
if v.mocked {
|
||||||
|
start := time.Now()
|
||||||
pr, err := daemon.StreamUpdate(existingVideoData.ClaimID, jsonrpc.StreamUpdateOptions{
|
pr, err := daemon.StreamUpdate(existingVideoData.ClaimID, jsonrpc.StreamUpdateOptions{
|
||||||
StreamCreateOptions: streamCreateOptions,
|
StreamCreateOptions: streamCreateOptions,
|
||||||
FileSize: &videoSize,
|
FileSize: &videoSize,
|
||||||
})
|
})
|
||||||
|
timing.TimedComponent("StreamUpdate").Add(time.Since(start))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -568,15 +970,11 @@ func (v *YoutubeVideo) reprocess(daemon *jsonrpc.Client, params SyncParams, exis
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
videoDuration, err := duration.FromString(v.youtubeInfo.ContentDetails.Duration)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Err(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
streamCreateOptions.ClaimCreateOptions.Title = &v.title
|
streamCreateOptions.ClaimCreateOptions.Title = &v.title
|
||||||
streamCreateOptions.ClaimCreateOptions.Description = util.PtrToString(v.getAbbrevDescription())
|
streamCreateOptions.ClaimCreateOptions.Description = util.PtrToString(v.getAbbrevDescription())
|
||||||
streamCreateOptions.Duration = util.PtrToUint64(uint64(math.Ceil(videoDuration.ToDuration().Seconds())))
|
streamCreateOptions.Duration = util.PtrToUint64(uint64(v.youtubeInfo.Duration))
|
||||||
streamCreateOptions.ReleaseTime = util.PtrToInt64(v.publishedAt.Unix())
|
streamCreateOptions.ReleaseTime = util.PtrToInt64(v.publishedAt.Unix())
|
||||||
|
start := time.Now()
|
||||||
pr, err := daemon.StreamUpdate(existingVideoData.ClaimID, jsonrpc.StreamUpdateOptions{
|
pr, err := daemon.StreamUpdate(existingVideoData.ClaimID, jsonrpc.StreamUpdateOptions{
|
||||||
ClearLanguages: util.PtrToBool(true),
|
ClearLanguages: util.PtrToBool(true),
|
||||||
ClearLocations: util.PtrToBool(true),
|
ClearLocations: util.PtrToBool(true),
|
||||||
|
@ -584,6 +982,7 @@ func (v *YoutubeVideo) reprocess(daemon *jsonrpc.Client, params SyncParams, exis
|
||||||
StreamCreateOptions: streamCreateOptions,
|
StreamCreateOptions: streamCreateOptions,
|
||||||
FileSize: &videoSize,
|
FileSize: &videoSize,
|
||||||
})
|
})
|
||||||
|
timing.TimedComponent("StreamUpdate").Add(time.Since(start))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
53
sources/youtubeVideo_test.go
Normal file
53
sources/youtubeVideo_test.go
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
package sources
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/abadojack/whatlanggo"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLanguageDetection(t *testing.T) {
|
||||||
|
description := `Om lättkränkta muslimer, och den bristande logiken i vad som anses vara att vanära profeten. Från Moderata riksdagspolitikern Hanif Balis podcast "God Ton", avsnitt 108, från oktober 2020, efter terrordådet där en fransk lärare fick huvudet avskuret efter att undervisat sin mångkulturella klass om frihet.`
|
||||||
|
info := whatlanggo.Detect(description)
|
||||||
|
logrus.Infof("confidence: %.2f", info.Confidence)
|
||||||
|
assert.True(t, info.IsReliable())
|
||||||
|
assert.True(t, info.Lang.Iso6391() != "")
|
||||||
|
assert.Equal(t, "sv", info.Lang.Iso6391())
|
||||||
|
|
||||||
|
description = `🥳週四直播 | 晚上來開個賽車🔰歡迎各位一起來玩! - PonPonLin蹦蹦林`
|
||||||
|
info = whatlanggo.Detect(description)
|
||||||
|
logrus.Infof("confidence: %.2f", info.Confidence)
|
||||||
|
assert.True(t, info.IsReliable())
|
||||||
|
assert.True(t, info.Lang.Iso6391() != "")
|
||||||
|
assert.Equal(t, "zh", info.Lang.Iso6391())
|
||||||
|
|
||||||
|
description = `成為這個頻道的會員並獲得獎勵:
|
||||||
|
https://www.youtube.com/channel/UCOQFrooz-YGHjYb7s3-MrsQ/join
|
||||||
|
_____________________________________________
|
||||||
|
想聽我既音樂作品可以去下面LINK
|
||||||
|
streetvoice 街聲:
|
||||||
|
https://streetvoice.com/CTLam331/
|
||||||
|
_____________________________________________
|
||||||
|
想學結他、鋼琴
|
||||||
|
有關音樂制作工作
|
||||||
|
都可以搵我~
|
||||||
|
大家快D訂閱喇
|
||||||
|
不定期出片
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Website: http://ctlam331.wixsite.com/ctlamusic
|
||||||
|
FB PAGE:https://www.facebook.com/ctlam331
|
||||||
|
IG:ctlamusic`
|
||||||
|
urlsRegex := regexp.MustCompile(`(?m) ?(f|ht)(tp)(s?)(://)(.*)[.|/](.*)`)
|
||||||
|
descriptionSample := urlsRegex.ReplaceAllString(description, "")
|
||||||
|
info = whatlanggo.Detect(descriptionSample)
|
||||||
|
logrus.Infof("confidence: %.2f", info.Confidence)
|
||||||
|
assert.True(t, info.IsReliable())
|
||||||
|
assert.True(t, info.Lang.Iso6391() != "")
|
||||||
|
assert.Equal(t, "zh", info.Lang.Iso6391())
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package tagsManager
|
package tags_manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"regexp"
|
"regexp"
|
|
@ -1,4 +1,4 @@
|
||||||
package tagsManager
|
package tags_manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
|
@ -4,14 +4,17 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/lbryio/errors.go"
|
"github.com/lbryio/ytsync/v5/configs"
|
||||||
|
"github.com/lbryio/ytsync/v5/downloader/ytdl"
|
||||||
|
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"google.golang.org/api/youtube/v3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type thumbnailUploader struct {
|
type thumbnailUploader struct {
|
||||||
|
@ -31,7 +34,9 @@ func (u *thumbnailUploader) downloadThumbnail() error {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
defer img.Close()
|
defer img.Close()
|
||||||
|
if strings.HasPrefix(u.originalUrl, "//") {
|
||||||
|
u.originalUrl = "https:" + u.originalUrl
|
||||||
|
}
|
||||||
resp, err := http.Get(u.originalUrl)
|
resp, err := http.Get(u.originalUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
|
@ -66,7 +71,9 @@ func (u *thumbnailUploader) uploadThumbnail() error {
|
||||||
Body: thumb,
|
Body: thumb,
|
||||||
ACL: aws.String("public-read"),
|
ACL: aws.String("public-read"),
|
||||||
ContentType: aws.String("image/jpeg"),
|
ContentType: aws.String("image/jpeg"),
|
||||||
|
CacheControl: aws.String("public, max-age=2592000"),
|
||||||
})
|
})
|
||||||
|
|
||||||
u.mirroredUrl = ThumbnailEndpoint + u.name
|
u.mirroredUrl = ThumbnailEndpoint + u.name
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
@ -77,11 +84,11 @@ func (u *thumbnailUploader) deleteTmpFile() {
|
||||||
log.Infof("failed to delete local thumbnail file: %s", err.Error())
|
log.Infof("failed to delete local thumbnail file: %s", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func MirrorThumbnail(url string, name string, s3Config aws.Config) (string, error) {
|
func MirrorThumbnail(url string, name string) (string, error) {
|
||||||
tu := thumbnailUploader{
|
tu := thumbnailUploader{
|
||||||
originalUrl: url,
|
originalUrl: url,
|
||||||
name: name,
|
name: name,
|
||||||
s3Config: s3Config,
|
s3Config: *configs.Configuration.AWSThumbnailsS3Config.GetS3AWSConfig(),
|
||||||
}
|
}
|
||||||
err := tu.downloadThumbnail()
|
err := tu.downloadThumbnail()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -94,18 +101,26 @@ func MirrorThumbnail(url string, name string, s3Config aws.Config) (string, erro
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//this is our own S3 storage
|
||||||
|
tu2 := thumbnailUploader{
|
||||||
|
originalUrl: url,
|
||||||
|
name: name,
|
||||||
|
s3Config: *configs.Configuration.ThumbnailsS3Config.GetS3AWSConfig(),
|
||||||
|
}
|
||||||
|
err = tu2.uploadThumbnail()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
return tu.mirroredUrl, nil
|
return tu.mirroredUrl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetBestThumbnail(thumbnails *youtube.ThumbnailDetails) *youtube.Thumbnail {
|
func GetBestThumbnail(thumbnails []ytdl.Thumbnail) *ytdl.Thumbnail {
|
||||||
if thumbnails.Maxres != nil {
|
var bestWidth ytdl.Thumbnail
|
||||||
return thumbnails.Maxres
|
for _, thumbnail := range thumbnails {
|
||||||
} else if thumbnails.High != nil {
|
if bestWidth.Width < thumbnail.Width {
|
||||||
return thumbnails.High
|
bestWidth = thumbnail
|
||||||
} else if thumbnails.Medium != nil {
|
|
||||||
return thumbnails.Medium
|
|
||||||
} else if thumbnails.Standard != nil {
|
|
||||||
return thumbnails.Standard
|
|
||||||
}
|
}
|
||||||
return thumbnails.Default
|
}
|
||||||
|
return &bestWidth
|
||||||
}
|
}
|
||||||
|
|
114
timing/timing.go
Normal file
114
timing/timing.go
Normal file
|
@ -0,0 +1,114 @@
|
||||||
|
package timing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lbryio/ytsync/v5/metrics"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Timing struct {
|
||||||
|
component string
|
||||||
|
milliseconds int64
|
||||||
|
min int64
|
||||||
|
max int64
|
||||||
|
invocations int32
|
||||||
|
}
|
||||||
|
|
||||||
|
var timings *sync.Map
|
||||||
|
|
||||||
|
func TimedComponent(component string) *Timing {
|
||||||
|
if timings == nil {
|
||||||
|
timings = &sync.Map{}
|
||||||
|
}
|
||||||
|
stored, _ := timings.LoadOrStore(component, &Timing{
|
||||||
|
component: component,
|
||||||
|
milliseconds: 0,
|
||||||
|
min: int64(99999999),
|
||||||
|
})
|
||||||
|
t, _ := stored.(*Timing)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func ClearTimings() {
|
||||||
|
if timings == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
timings.Range(func(key interface{}, value interface{}) bool {
|
||||||
|
timings.Delete(key)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func Report() {
|
||||||
|
var totalTime time.Duration
|
||||||
|
timings.Range(func(key interface{}, value interface{}) bool {
|
||||||
|
totalTime += value.(*Timing).Get()
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
timings.Range(func(key interface{}, value interface{}) bool {
|
||||||
|
component := key
|
||||||
|
componentRuntime := value.(*Timing).Get().String()
|
||||||
|
percentTime := float64(value.(*Timing).Get()) / float64(totalTime) * 100
|
||||||
|
invocations := value.(*Timing).Invocations()
|
||||||
|
avgTime := (time.Duration(int64(float64(value.(*Timing).Get()) / float64(value.(*Timing).Invocations())))).String()
|
||||||
|
minRuntime := value.(*Timing).Min().String()
|
||||||
|
maxRuntime := value.(*Timing).Max().String()
|
||||||
|
logrus.Printf("component %s ran for %s (%.2f%% of the total time) - invoked %d times with an average of %s per call, a minimum of %s and a maximum of %s",
|
||||||
|
component,
|
||||||
|
componentRuntime,
|
||||||
|
percentTime,
|
||||||
|
invocations,
|
||||||
|
avgTime,
|
||||||
|
minRuntime,
|
||||||
|
maxRuntime,
|
||||||
|
)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Timing) Add(d time.Duration) {
|
||||||
|
metrics.Durations.WithLabelValues(t.component).Observe(d.Seconds())
|
||||||
|
atomic.AddInt64(&t.milliseconds, d.Milliseconds())
|
||||||
|
for {
|
||||||
|
oldMin := atomic.LoadInt64(&t.min)
|
||||||
|
if d.Milliseconds() < oldMin {
|
||||||
|
if atomic.CompareAndSwapInt64(&t.min, oldMin, d.Milliseconds()) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
oldMax := atomic.LoadInt64(&t.max)
|
||||||
|
if d.Milliseconds() > oldMax {
|
||||||
|
if atomic.CompareAndSwapInt64(&t.max, oldMax, d.Milliseconds()) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
atomic.AddInt32(&t.invocations, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Timing) Get() time.Duration {
|
||||||
|
ms := atomic.LoadInt64(&t.milliseconds)
|
||||||
|
return time.Duration(ms) * time.Millisecond
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Timing) Invocations() int32 {
|
||||||
|
return atomic.LoadInt32(&t.invocations)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Timing) Min() time.Duration {
|
||||||
|
ms := atomic.LoadInt64(&t.min)
|
||||||
|
return time.Duration(ms) * time.Millisecond
|
||||||
|
}
|
||||||
|
func (t *Timing) Max() time.Duration {
|
||||||
|
ms := atomic.LoadInt64(&t.max)
|
||||||
|
return time.Duration(ms) * time.Millisecond
|
||||||
|
}
|
109
util/archive.go
Normal file
109
util/archive.go
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func CreateTarball(tarballFilePath string, filePaths []string) error {
|
||||||
|
file, err := os.Create(tarballFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err("Could not create tarball file '%s', got error '%s'", tarballFilePath, err.Error())
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
tarWriter := tar.NewWriter(file)
|
||||||
|
defer tarWriter.Close()
|
||||||
|
|
||||||
|
for _, filePath := range filePaths {
|
||||||
|
err := addFileToTarWriter(filePath, tarWriter)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err("Could not add file '%s', to tarball, got error '%s'", filePath, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addFileToTarWriter(filePath string, tarWriter *tar.Writer) error {
|
||||||
|
file, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err("Could not open file '%s', got error '%s'", filePath, err.Error())
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
stat, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err("Could not get stat for file '%s', got error '%s'", filePath, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
header := &tar.Header{
|
||||||
|
Name: stat.Name(),
|
||||||
|
Size: stat.Size(),
|
||||||
|
Mode: int64(stat.Mode()),
|
||||||
|
ModTime: stat.ModTime(),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tarWriter.WriteHeader(header)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err("Could not write header for file '%s', got error '%s'", filePath, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(tarWriter, file)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err("Could not copy the file '%s' data to the tarball, got error '%s'", filePath, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Untar(tarball, target string) error {
|
||||||
|
reader, err := os.Open(tarball)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
tarReader := tar.NewReader(reader)
|
||||||
|
|
||||||
|
for {
|
||||||
|
header, err := tarReader.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
path := filepath.Join(target, header.Name)
|
||||||
|
info := header.FileInfo()
|
||||||
|
if info.IsDir() {
|
||||||
|
if err = os.MkdirAll(path, info.Mode()); err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = extractFile(path, info, tarReader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractFile(path string, info fs.FileInfo, tarReader *tar.Reader) error {
|
||||||
|
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode())
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
_, err = io.Copy(file, tarReader)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -3,7 +3,7 @@ package util
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/extras/util"
|
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -14,7 +14,9 @@ func SendErrorToSlack(format string, a ...interface{}) {
|
||||||
message = fmt.Sprintf(format, a...)
|
message = fmt.Sprintf(format, a...)
|
||||||
}
|
}
|
||||||
log.Errorln(message)
|
log.Errorln(message)
|
||||||
err := util.SendToSlack(":sos: " + message)
|
log.SetLevel(log.InfoLevel) //I don't want to change the underlying lib so this will do...
|
||||||
|
err := util.SendToSlack(":sos: ```" + message + "```")
|
||||||
|
log.SetLevel(log.DebugLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorln(err)
|
log.Errorln(err)
|
||||||
}
|
}
|
||||||
|
@ -27,7 +29,9 @@ func SendInfoToSlack(format string, a ...interface{}) {
|
||||||
message = fmt.Sprintf(format, a...)
|
message = fmt.Sprintf(format, a...)
|
||||||
}
|
}
|
||||||
log.Infoln(message)
|
log.Infoln(message)
|
||||||
|
log.SetLevel(log.InfoLevel) //I don't want to change the underlying lib so this will do...
|
||||||
err := util.SendToSlack(":information_source: " + message)
|
err := util.SendToSlack(":information_source: " + message)
|
||||||
|
log.SetLevel(log.DebugLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorln(err)
|
log.Errorln(err)
|
||||||
}
|
}
|
||||||
|
|
137
util/util.go
137
util/util.go
|
@ -7,9 +7,12 @@ import (
|
||||||
"os/user"
|
"os/user"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/extras/errors"
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
"github.com/lbryio/lbry.go/lbrycrd"
|
"github.com/lbryio/lbry.go/v2/lbrycrd"
|
||||||
|
"github.com/lbryio/ytsync/v5/configs"
|
||||||
|
"github.com/lbryio/ytsync/v5/timing"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
|
@ -49,6 +52,19 @@ func GetLBRYNetDir() string {
|
||||||
return lbrynetDir
|
return lbrynetDir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetLbryumDir() string {
|
||||||
|
lbryumDir := os.Getenv("LBRYUM_DIR")
|
||||||
|
if lbryumDir == "" {
|
||||||
|
usr, err := user.Current()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorln(err.Error())
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return usr.HomeDir + "/.lbryum/"
|
||||||
|
}
|
||||||
|
return lbryumDir + "/"
|
||||||
|
}
|
||||||
|
|
||||||
const ALL = true
|
const ALL = true
|
||||||
const ONLINE = false
|
const ONLINE = false
|
||||||
|
|
||||||
|
@ -94,15 +110,20 @@ func IsRegTest() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetLbrycrdClient(lbrycrdString string) (*lbrycrd.Client, error) {
|
func GetLbrycrdClient(lbrycrdString string) (*lbrycrd.Client, error) {
|
||||||
|
chainName := os.Getenv("CHAINNAME")
|
||||||
|
chainParams, ok := lbrycrd.ChainParamsMap[chainName]
|
||||||
|
if !ok {
|
||||||
|
chainParams = lbrycrd.MainNetParams
|
||||||
|
}
|
||||||
var lbrycrdd *lbrycrd.Client
|
var lbrycrdd *lbrycrd.Client
|
||||||
var err error
|
var err error
|
||||||
if lbrycrdString == "" {
|
if lbrycrdString == "" {
|
||||||
lbrycrdd, err = lbrycrd.NewWithDefaultURL()
|
lbrycrdd, err = lbrycrd.NewWithDefaultURL(&chainParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
lbrycrdd, err = lbrycrd.New(lbrycrdString)
|
lbrycrdd, err = lbrycrd.New(lbrycrdString, &chainParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -165,18 +186,18 @@ func CleanForStartup() error {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lbrycrd, err := GetLbrycrdClient(os.Getenv("LBRYCRD_STRING"))
|
lbrycrd, err := GetLbrycrdClient(configs.Configuration.LbrycrdString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Prefix("error getting lbrycrd client: ", err)
|
return errors.Prefix("error getting lbrycrd client", err)
|
||||||
}
|
}
|
||||||
height, err := lbrycrd.GetBlockCount()
|
height, err := lbrycrd.GetBlockCount()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
const minBlocksForUTXO = 110
|
const minBlocksForUTXO = 200
|
||||||
if height < minBlocksForUTXO {
|
if height < minBlocksForUTXO {
|
||||||
//Start reg test will some credits
|
//Start reg test with some credits
|
||||||
txs, err := lbrycrd.Generate(uint32(110) - uint32(height))
|
txs, err := lbrycrd.Generate(uint32(minBlocksForUTXO) - uint32(height))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
@ -220,10 +241,66 @@ func CleanupLbrynet() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Err(err)
|
return errors.Err(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lbryumDir := GetLbryumDir()
|
||||||
|
ledger := "lbc_mainnet"
|
||||||
|
if IsRegTest() {
|
||||||
|
ledger = "lbc_regtest"
|
||||||
|
}
|
||||||
|
lbryumDir = lbryumDir + ledger
|
||||||
|
|
||||||
|
files, err = filepath.Glob(lbryumDir + "/blockchain.db*")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
for _, f := range files {
|
||||||
|
err = os.Remove(f)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var metadataDirInitialized = false
|
||||||
|
|
||||||
|
func GetVideoMetadataDir() string {
|
||||||
|
dir := "./videos_metadata"
|
||||||
|
if !metadataDirInitialized {
|
||||||
|
metadataDirInitialized = true
|
||||||
|
_ = os.MkdirAll(dir, 0755)
|
||||||
|
}
|
||||||
|
return dir
|
||||||
|
}
|
||||||
|
|
||||||
|
func CleanupMetadata() error {
|
||||||
|
dir := GetVideoMetadataDir()
|
||||||
|
err := os.RemoveAll(dir)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Err(err)
|
||||||
|
}
|
||||||
|
metadataDirInitialized = false
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SleepUntilQuotaReset() {
|
||||||
|
PST, _ := time.LoadLocation("America/Los_Angeles")
|
||||||
|
t := time.Now().In(PST)
|
||||||
|
n := time.Date(t.Year(), t.Month(), t.Day(), 24, 2, 0, 0, PST)
|
||||||
|
d := n.Sub(t)
|
||||||
|
if d < 0 {
|
||||||
|
n = n.Add(24 * time.Hour)
|
||||||
|
d = n.Sub(t)
|
||||||
|
}
|
||||||
|
log.Infof("gotta sleep %s until the quota resets", d.String())
|
||||||
|
time.Sleep(d)
|
||||||
|
}
|
||||||
|
|
||||||
func StartDaemon() error {
|
func StartDaemon() error {
|
||||||
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("startDaemon").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
if IsUsingDocker() {
|
if IsUsingDocker() {
|
||||||
return startDaemonViaDocker()
|
return startDaemonViaDocker()
|
||||||
}
|
}
|
||||||
|
@ -231,6 +308,10 @@ func StartDaemon() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func StopDaemon() error {
|
func StopDaemon() error {
|
||||||
|
start := time.Now()
|
||||||
|
defer func(start time.Time) {
|
||||||
|
timing.TimedComponent("stopDaemon").Add(time.Since(start))
|
||||||
|
}(start)
|
||||||
if IsUsingDocker() {
|
if IsUsingDocker() {
|
||||||
return stopDaemonViaDocker()
|
return stopDaemonViaDocker()
|
||||||
}
|
}
|
||||||
|
@ -297,9 +378,45 @@ func GetDefaultWalletPath() string {
|
||||||
defaultWalletDir = os.Getenv("HOME") + "/.lbryum_regtest/wallets/default_wallet"
|
defaultWalletDir = os.Getenv("HOME") + "/.lbryum_regtest/wallets/default_wallet"
|
||||||
}
|
}
|
||||||
|
|
||||||
walletPath := os.Getenv("LBRYNET_WALLETS_DIR")
|
walletPath := os.Getenv("LBRYUM_DIR")
|
||||||
if walletPath != "" {
|
if walletPath != "" {
|
||||||
defaultWalletDir = walletPath + "/wallets/default_wallet"
|
defaultWalletDir = walletPath + "/wallets/default_wallet"
|
||||||
}
|
}
|
||||||
return defaultWalletDir
|
return defaultWalletDir
|
||||||
}
|
}
|
||||||
|
func GetBlockchainDBPath() string {
|
||||||
|
lbryumDir := os.Getenv("LBRYUM_DIR")
|
||||||
|
if lbryumDir == "" {
|
||||||
|
if IsRegTest() {
|
||||||
|
lbryumDir = os.Getenv("HOME") + "/.lbryum_regtest"
|
||||||
|
} else {
|
||||||
|
lbryumDir = os.Getenv("HOME") + "/.lbryum"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
defaultDB := lbryumDir + "/lbc_mainnet/blockchain.db"
|
||||||
|
if IsRegTest() {
|
||||||
|
defaultDB = lbryumDir + "/lbc_regtest/blockchain.db"
|
||||||
|
}
|
||||||
|
return defaultDB
|
||||||
|
}
|
||||||
|
func GetBlockchainDirectoryName() string {
|
||||||
|
ledger := "lbc_mainnet"
|
||||||
|
if IsRegTest() {
|
||||||
|
ledger = "lbc_regtest"
|
||||||
|
}
|
||||||
|
return ledger
|
||||||
|
}
|
||||||
|
|
||||||
|
func DirSize(path string) (int64, error) {
|
||||||
|
var size int64
|
||||||
|
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !info.IsDir() {
|
||||||
|
size += info.Size()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
return size, err
|
||||||
|
}
|
||||||
|
|
388
ytapi/ytapi.go
Normal file
388
ytapi/ytapi.go
Normal file
|
@ -0,0 +1,388 @@
|
||||||
|
package ytapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lbryio/ytsync/v5/shared"
|
||||||
|
logUtils "github.com/lbryio/ytsync/v5/util"
|
||||||
|
"github.com/vbauerster/mpb/v7"
|
||||||
|
|
||||||
|
"github.com/lbryio/ytsync/v5/downloader/ytdl"
|
||||||
|
|
||||||
|
"github.com/lbryio/ytsync/v5/downloader"
|
||||||
|
"github.com/lbryio/ytsync/v5/ip_manager"
|
||||||
|
"github.com/lbryio/ytsync/v5/sdk"
|
||||||
|
"github.com/lbryio/ytsync/v5/sources"
|
||||||
|
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/jsonrpc"
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||||
|
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Video interface {
|
||||||
|
Size() *int64
|
||||||
|
ID() string
|
||||||
|
IDAndNum() string
|
||||||
|
PlaylistPosition() int
|
||||||
|
PublishedAt() time.Time
|
||||||
|
Sync(*jsonrpc.Client, sources.SyncParams, *sdk.SyncedVideo, bool, *sync.RWMutex, *sync.WaitGroup, *mpb.Progress) (*sources.SyncSummary, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type byPublishedAt []Video
|
||||||
|
|
||||||
|
func (a byPublishedAt) Len() int { return len(a) }
|
||||||
|
func (a byPublishedAt) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
func (a byPublishedAt) Less(i, j int) bool { return a[i].PublishedAt().Before(a[j].PublishedAt()) }
|
||||||
|
|
||||||
|
type VideoParams struct {
|
||||||
|
VideoDir string
|
||||||
|
Stopper *stop.Group
|
||||||
|
IPPool *ip_manager.IPPool
|
||||||
|
}
|
||||||
|
|
||||||
|
var mostRecentlyFailedChannel string // TODO: fix this hack!
|
||||||
|
|
||||||
|
func GetVideosToSync(channelID string, syncedVideos map[string]sdk.SyncedVideo, quickSync bool, maxVideos int, videoParams VideoParams, lastUploadedVideo string) ([]Video, error) {
|
||||||
|
var videos []Video
|
||||||
|
if quickSync && maxVideos > 50 {
|
||||||
|
maxVideos = 50
|
||||||
|
}
|
||||||
|
allVideos, err := downloader.GetPlaylistVideoIDs(channelID, maxVideos, videoParams.Stopper.Ch(), videoParams.IPPool)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
videoIDs := make([]string, 0, len(allVideos))
|
||||||
|
for _, video := range allVideos {
|
||||||
|
sv, ok := syncedVideos[video]
|
||||||
|
if ok && util.SubstringInSlice(sv.FailureReason, shared.NeverRetryFailures) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
videoIDs = append(videoIDs, video)
|
||||||
|
}
|
||||||
|
log.Infof("Got info for %d videos from youtube downloader", len(videoIDs))
|
||||||
|
|
||||||
|
playlistMap := make(map[string]int64)
|
||||||
|
for i, videoID := range videoIDs {
|
||||||
|
playlistMap[videoID] = int64(i)
|
||||||
|
}
|
||||||
|
//this will ensure that we at least try to sync the video that was marked as last uploaded video in the database.
|
||||||
|
if lastUploadedVideo != "" {
|
||||||
|
_, ok := playlistMap[lastUploadedVideo]
|
||||||
|
if !ok {
|
||||||
|
playlistMap[lastUploadedVideo] = 0
|
||||||
|
videoIDs = append(videoIDs, lastUploadedVideo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(videoIDs) < 1 {
|
||||||
|
if channelID == mostRecentlyFailedChannel {
|
||||||
|
return nil, errors.Err("playlist items not found")
|
||||||
|
}
|
||||||
|
mostRecentlyFailedChannel = channelID
|
||||||
|
}
|
||||||
|
|
||||||
|
vids, err := getVideos(channelID, videoIDs, videoParams.Stopper.Ch(), videoParams.IPPool)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range vids {
|
||||||
|
positionInList := playlistMap[item.ID]
|
||||||
|
videoToAdd, err := sources.NewYoutubeVideo(videoParams.VideoDir, item, positionInList, videoParams.Stopper, videoParams.IPPool)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
videos = append(videos, videoToAdd)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range syncedVideos {
|
||||||
|
newMetadataVersion := int8(2)
|
||||||
|
if !v.Published && v.MetadataVersion >= newMetadataVersion {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := playlistMap[k]; !ok {
|
||||||
|
videos = append(videos, sources.NewMockedVideo(videoParams.VideoDir, k, channelID, videoParams.Stopper, videoParams.IPPool))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(byPublishedAt(videos))
|
||||||
|
|
||||||
|
return videos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountVideosInChannel is unused for now... keeping it here just in case
|
||||||
|
func CountVideosInChannel(channelID string) (int, error) {
|
||||||
|
url := "https://socialblade.com/youtube/channel/" + channelID
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", url, nil)
|
||||||
|
|
||||||
|
req.Header.Add("User-Agent", downloader.ChromeUA)
|
||||||
|
req.Header.Add("Accept", "*/*")
|
||||||
|
req.Header.Add("Host", "socialblade.com")
|
||||||
|
|
||||||
|
res, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Err(err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
var line string
|
||||||
|
scanner := bufio.NewScanner(res.Body)
|
||||||
|
for scanner.Scan() {
|
||||||
|
if strings.Contains(scanner.Text(), "youtube-stats-header-uploads") {
|
||||||
|
line = scanner.Text()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if line == "" {
|
||||||
|
return 0, errors.Err("upload count line not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
matches := regexp.MustCompile(">([0-9]+)<").FindStringSubmatch(line)
|
||||||
|
if len(matches) != 2 {
|
||||||
|
return 0, errors.Err("upload count not found with regex")
|
||||||
|
}
|
||||||
|
|
||||||
|
num, err := strconv.Atoi(matches[1])
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return num, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChannelInfo(channelID string) (*YoutubeStatsResponse, error) {
|
||||||
|
url := "https://www.youtube.com/channel/" + channelID + "/about"
|
||||||
|
|
||||||
|
req, _ := http.NewRequest("GET", url, nil)
|
||||||
|
|
||||||
|
req.Header.Add("User-Agent", downloader.ChromeUA)
|
||||||
|
req.Header.Add("Accept", "*/*")
|
||||||
|
|
||||||
|
res, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
body, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
pageBody := string(body)
|
||||||
|
dataStartIndex := strings.Index(pageBody, "window[\"ytInitialData\"] = ") + 26
|
||||||
|
if dataStartIndex == 25 {
|
||||||
|
dataStartIndex = strings.Index(pageBody, "var ytInitialData = ") + 20
|
||||||
|
}
|
||||||
|
dataEndIndex := strings.Index(pageBody, "]}}};") + 4
|
||||||
|
if dataEndIndex < dataStartIndex {
|
||||||
|
return nil, errors.Err("start index is lower than end index. cannot extract channel info!")
|
||||||
|
}
|
||||||
|
data := pageBody[dataStartIndex:dataEndIndex]
|
||||||
|
var decodedResponse YoutubeStatsResponse
|
||||||
|
err = json.Unmarshal([]byte(data), &decodedResponse)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &decodedResponse, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getVideos(channelID string, videoIDs []string, stopChan stop.Chan, ipPool *ip_manager.IPPool) ([]*ytdl.YtdlVideo, error) {
|
||||||
|
config := sdk.GetAPIsConfigs()
|
||||||
|
var videos []*ytdl.YtdlVideo
|
||||||
|
for _, videoID := range videoIDs {
|
||||||
|
if len(videoID) < 5 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-stopChan:
|
||||||
|
return videos, errors.Err("interrupted by user")
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
state, err := config.VideoState(videoID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Err(err)
|
||||||
|
}
|
||||||
|
if state == "published" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
video, err := downloader.GetVideoInformation(videoID, stopChan, ipPool)
|
||||||
|
if err != nil {
|
||||||
|
errSDK := config.MarkVideoStatus(shared.VideoStatus{
|
||||||
|
ChannelID: channelID,
|
||||||
|
VideoID: videoID,
|
||||||
|
Status: "failed",
|
||||||
|
FailureReason: err.Error(),
|
||||||
|
})
|
||||||
|
logUtils.SendErrorToSlack(fmt.Sprintf("Skipping video (%s): %s", videoID, errors.FullTrace(err)))
|
||||||
|
if errSDK != nil {
|
||||||
|
return nil, errors.Err(errSDK)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
videos = append(videos, video)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return videos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type YoutubeStatsResponse struct {
|
||||||
|
Contents struct {
|
||||||
|
TwoColumnBrowseResultsRenderer struct {
|
||||||
|
Tabs []struct {
|
||||||
|
TabRenderer struct {
|
||||||
|
Title string `json:"title"`
|
||||||
|
Selected bool `json:"selected"`
|
||||||
|
Content struct {
|
||||||
|
SectionListRenderer struct {
|
||||||
|
Contents []struct {
|
||||||
|
ItemSectionRenderer struct {
|
||||||
|
Contents []struct {
|
||||||
|
ChannelAboutFullMetadataRenderer struct {
|
||||||
|
Description struct {
|
||||||
|
SimpleText string `json:"simpleText"`
|
||||||
|
} `json:"description"`
|
||||||
|
ViewCountText struct {
|
||||||
|
SimpleText string `json:"simpleText"`
|
||||||
|
} `json:"viewCountText"`
|
||||||
|
JoinedDateText struct {
|
||||||
|
Runs []struct {
|
||||||
|
Text string `json:"text"`
|
||||||
|
} `json:"runs"`
|
||||||
|
} `json:"joinedDateText"`
|
||||||
|
CanonicalChannelURL string `json:"canonicalChannelUrl"`
|
||||||
|
BypassBusinessEmailCaptcha bool `json:"bypassBusinessEmailCaptcha"`
|
||||||
|
Title struct {
|
||||||
|
SimpleText string `json:"simpleText"`
|
||||||
|
} `json:"title"`
|
||||||
|
Avatar struct {
|
||||||
|
Thumbnails []struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Width int `json:"width"`
|
||||||
|
Height int `json:"height"`
|
||||||
|
} `json:"thumbnails"`
|
||||||
|
} `json:"avatar"`
|
||||||
|
ShowDescription bool `json:"showDescription"`
|
||||||
|
DescriptionLabel struct {
|
||||||
|
Runs []struct {
|
||||||
|
Text string `json:"text"`
|
||||||
|
} `json:"runs"`
|
||||||
|
} `json:"descriptionLabel"`
|
||||||
|
DetailsLabel struct {
|
||||||
|
Runs []struct {
|
||||||
|
Text string `json:"text"`
|
||||||
|
} `json:"runs"`
|
||||||
|
} `json:"detailsLabel"`
|
||||||
|
ChannelID string `json:"channelId"`
|
||||||
|
} `json:"channelAboutFullMetadataRenderer"`
|
||||||
|
} `json:"contents"`
|
||||||
|
} `json:"itemSectionRenderer"`
|
||||||
|
} `json:"contents"`
|
||||||
|
} `json:"sectionListRenderer"`
|
||||||
|
} `json:"content"`
|
||||||
|
} `json:"tabRenderer"`
|
||||||
|
} `json:"tabs"`
|
||||||
|
} `json:"twoColumnBrowseResultsRenderer"`
|
||||||
|
} `json:"contents"`
|
||||||
|
Header struct {
|
||||||
|
C4TabbedHeaderRenderer struct {
|
||||||
|
ChannelID string `json:"channelId"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
Avatar struct {
|
||||||
|
Thumbnails []struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Width int `json:"width"`
|
||||||
|
Height int `json:"height"`
|
||||||
|
} `json:"thumbnails"`
|
||||||
|
} `json:"avatar"`
|
||||||
|
Banner struct {
|
||||||
|
Thumbnails []struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Width int `json:"width"`
|
||||||
|
Height int `json:"height"`
|
||||||
|
} `json:"thumbnails"`
|
||||||
|
} `json:"banner"`
|
||||||
|
VisitTracking struct {
|
||||||
|
RemarketingPing string `json:"remarketingPing"`
|
||||||
|
} `json:"visitTracking"`
|
||||||
|
SubscriberCountText struct {
|
||||||
|
SimpleText string `json:"simpleText"`
|
||||||
|
} `json:"subscriberCountText"`
|
||||||
|
} `json:"c4TabbedHeaderRenderer"`
|
||||||
|
} `json:"header"`
|
||||||
|
Metadata struct {
|
||||||
|
ChannelMetadataRenderer struct {
|
||||||
|
Title string `json:"title"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
RssURL string `json:"rssUrl"`
|
||||||
|
ChannelConversionURL string `json:"channelConversionUrl"`
|
||||||
|
ExternalID string `json:"externalId"`
|
||||||
|
Keywords string `json:"keywords"`
|
||||||
|
OwnerUrls []string `json:"ownerUrls"`
|
||||||
|
Avatar struct {
|
||||||
|
Thumbnails []struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Width int `json:"width"`
|
||||||
|
Height int `json:"height"`
|
||||||
|
} `json:"thumbnails"`
|
||||||
|
} `json:"avatar"`
|
||||||
|
ChannelURL string `json:"channelUrl"`
|
||||||
|
IsFamilySafe bool `json:"isFamilySafe"`
|
||||||
|
VanityChannelURL string `json:"vanityChannelUrl"`
|
||||||
|
} `json:"channelMetadataRenderer"`
|
||||||
|
} `json:"metadata"`
|
||||||
|
Topbar struct {
|
||||||
|
DesktopTopbarRenderer struct {
|
||||||
|
CountryCode string `json:"countryCode"`
|
||||||
|
} `json:"desktopTopbarRenderer"`
|
||||||
|
} `json:"topbar"`
|
||||||
|
Microformat struct {
|
||||||
|
MicroformatDataRenderer struct {
|
||||||
|
URLCanonical string `json:"urlCanonical"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
Thumbnail struct {
|
||||||
|
Thumbnails []struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Width int `json:"width"`
|
||||||
|
Height int `json:"height"`
|
||||||
|
} `json:"thumbnails"`
|
||||||
|
} `json:"thumbnail"`
|
||||||
|
SiteName string `json:"siteName"`
|
||||||
|
AppName string `json:"appName"`
|
||||||
|
AndroidPackage string `json:"androidPackage"`
|
||||||
|
IosAppStoreID string `json:"iosAppStoreId"`
|
||||||
|
IosAppArguments string `json:"iosAppArguments"`
|
||||||
|
OgType string `json:"ogType"`
|
||||||
|
URLApplinksWeb string `json:"urlApplinksWeb"`
|
||||||
|
URLApplinksIos string `json:"urlApplinksIos"`
|
||||||
|
URLApplinksAndroid string `json:"urlApplinksAndroid"`
|
||||||
|
URLTwitterIos string `json:"urlTwitterIos"`
|
||||||
|
URLTwitterAndroid string `json:"urlTwitterAndroid"`
|
||||||
|
TwitterCardType string `json:"twitterCardType"`
|
||||||
|
TwitterSiteHandle string `json:"twitterSiteHandle"`
|
||||||
|
SchemaDotOrgType string `json:"schemaDotOrgType"`
|
||||||
|
Noindex bool `json:"noindex"`
|
||||||
|
Unlisted bool `json:"unlisted"`
|
||||||
|
FamilySafe bool `json:"familySafe"`
|
||||||
|
Tags []string `json:"tags"`
|
||||||
|
} `json:"microformatDataRenderer"`
|
||||||
|
} `json:"microformat"`
|
||||||
|
}
|
13
ytapi/ytapi_test.go
Normal file
13
ytapi/ytapi_test.go
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
package ytapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestChannelInfo(t *testing.T) {
|
||||||
|
info, err := ChannelInfo("UCNQfQvFMPnInwsU_iGYArJQ")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, info)
|
||||||
|
}
|
Loading…
Reference in a new issue