Compare commits

...

377 commits

Author SHA1 Message Date
Niko Storni
78d5c8c6fa never retry those hardcoded errors 2022-08-16 03:06:39 +02:00
Niko Storni
caca92a6bc idk... let's work around this for now 2022-08-16 02:51:35 +02:00
Niko Storni
9ef1b7800b fix nil ptr? 2022-08-16 02:41:14 +02:00
Niko Storni
ea3315d1d6 this is actually necessary 2022-08-16 02:14:33 +02:00
Niko Storni
68132c65a9 fix channel update process 2022-08-15 22:42:06 +02:00
Niko Storni
57e017ec8f fix some logic
reduce verbosity of debug logs
2022-08-15 21:19:20 +02:00
Niko Storni
42db3782ec fix issue with yt not returning a date 2022-08-11 18:01:55 +02:00
Niko Storni
9d93799d86 account for nil struct 2022-08-11 05:28:06 +02:00
Niko
d93f463386
fix odd error with morty's date 2022-08-11 00:33:40 +02:00
Niko Storni
77988c1682 woops 2022-08-10 22:01:50 +02:00
Niko Storni
c79e07c9fa change recent livestreams logic slightly 2022-08-10 21:55:29 +02:00
Niko Storni
e454cdb4c9 fix post live detection
prevent unlisted videos from ever publishing (even if they were public before and we know about them)
fix timestamp on videos
update user agent
2022-08-10 21:26:36 +02:00
Niko Storni
98a10d1269 fix for channel creation bug 2022-08-10 18:23:37 +02:00
Niko Storni
4f6748ae83 fix bug in channel updates 2022-08-10 17:27:27 +02:00
Niko Storni
c1b2117df5 fix vuln 2022-08-09 22:27:00 +02:00
Niko Storni
c4207338c8 fix const bug
update dependencies
2022-08-09 22:17:03 +02:00
Niko Storni
5a01983203 add language to channels
improve logging (timestamps)
retry wallet uploads for 30 minutes
don't fail if the db isn't tracking all publishes
2022-08-09 22:11:42 +02:00
Niko Storni
ee8eb83d07 fix dependency vuln 2022-05-10 23:36:00 +02:00
Niko Storni
8d0f762067 change description length 2022-05-10 23:09:23 +02:00
Niko Storni
8fa1482d18 adjust limits 2022-05-09 20:23:44 +02:00
Niko Storni
00ae404642 exclude audio ac-3 codec not working in chrome 2022-05-09 20:05:05 +02:00
Niko Storni
230bfe4a41 fix go version in travis 2022-05-04 19:22:19 +02:00
Niko Storni
df33fb9263 fix api failures
update dependencies
fix e2e
2022-05-04 19:17:36 +02:00
Niko Storni
d72be1d920 add new status 2022-03-03 18:59:32 +01:00
Niko Storni
7d12a90139 avoid ec-3 audio codec 2022-02-09 17:06:51 +01:00
Niko
e1689a2a6c
Merge pull request #116 from e4drcf/synced-video-statuses
add shared video sync statuses array for validation purposes
2022-02-07 20:03:29 +01:00
Ivan
a8a6347d52 add shared video sync statuses array for validation purposes 2022-02-07 20:17:41 +02:00
Niko Storni
bdee1b4092 fix math to avoid negative balances 2022-01-26 07:43:09 +01:00
Niko Storni
0d0d39380c fix spend amounts to save credits 2022-01-26 07:11:26 +01:00
Niko Storni
7ff1a009da remove unused params 2022-01-14 18:49:02 +01:00
Niko Storni
e3a332c7e1 upgrade dependencies 2022-01-14 18:10:58 +01:00
Niko Storni
33ee6e4b94 Merge remote-tracking branch 'origin/metadata_fix' 2022-01-14 17:20:22 +01:00
Niko Storni
f6cde976a6 fix metadata? 2022-01-06 15:15:04 +01:00
Niko Storni
17944fa46a refactor get video time
remove broken time lookup
refactor quite some code
2021-12-30 13:17:11 -05:00
Niko Storni
3c18ae8de2 add checks for buggy livestreams 2021-12-29 17:47:46 -05:00
Niko Storni
84790720ff improve error handling
retry wallet uploads on failure
2021-12-02 16:59:14 +01:00
Niko
23690731af
fix bug when uploading wallet 2021-11-30 02:53:59 +01:00
Niko Storni
75628d8530 delete tars after use 2021-11-25 04:26:04 +01:00
Niko Storni
6e819b20f6 update readme 2021-11-24 18:58:38 +01:00
Niko Storni
da0b6e5b79 add example config 2021-11-24 18:39:24 +01:00
Niko Storni
28791f317b update gitignore
improve error logging to slack
fix regression in dev builds
2021-11-24 18:37:04 +01:00
Niko Storni
070287716b switch from env vars to a config file
get rid of stupid stuff
simplify S3 configuration
split wallets from blockchain.db and use separate S3 store
fix bugs
2021-11-24 05:54:08 +01:00
Alex Grin
41054e77a6
Update README.md 2021-09-28 10:20:17 -04:00
Niko Storni
6944e17f43 fix livestreams failing for wrong reason 2021-08-24 12:03:03 -04:00
Niko Storni
a224fe44c2 fix stream by magic 2021-08-24 11:54:38 -04:00
Niko Storni
fa9dc35123 fix patched bug 2021-07-26 18:03:37 +02:00
Niko Storni
3a0882230a bypass throttling 2021-07-13 22:49:27 -04:00
Niko Storni
fbd683e094 don't panic 2021-07-08 01:47:57 +02:00
Niko Storni
2f15c920d4 speed up playlist listing 2021-06-25 19:16:01 +02:00
Niko Storni
01f6448e72 revert aria2c usage 2021-06-25 19:09:00 +02:00
Niko Storni
8fb1e2ead0 fix empty thumbnails 2021-06-25 19:04:40 +02:00
Niko Storni
3b84db382c attempt using aria2c for some servers 2021-06-24 23:06:27 +02:00
Niko Storni
4fe6840a4e refactor code
fix bugs
add stub for speed checker
2021-06-18 04:47:08 +02:00
Niko Storni
69e6fb51d1 improve error handling 2021-06-18 03:09:19 +02:00
Niko Storni
f17110ab7f fix nilptr 2021-06-18 01:23:25 +02:00
Niko Storni
768743a200 fix deadlock 2021-06-17 22:02:42 +02:00
Niko Storni
7c652b22a1 better output 2021-06-17 19:43:30 +02:00
Niko Storni
a0fb4e579e fix progressbar
fix videos with leading dash
2021-06-17 19:13:44 +02:00
Niko Storni
519e1e4648 switch to yt-dlp
add progressbars
avoid unnecessary calls to youtube
update user agents
cookies fixes
bug fixes
introduction of new bugs
2021-06-17 17:51:21 +02:00
Niko Storni
087f20c133 fix broken video codec
exclude audio codecs that don't work on mobile
2021-05-25 21:08:12 +02:00
Niko Storni
21e6603a26 rollback is_spent changes
upgrade reflector/lbry.go
2021-04-13 00:55:50 +02:00
Niko Storni
ca41f5052e upgrade go on travis 2021-04-12 23:41:47 +02:00
Niko Storni
d739a98beb upgrade reflector to switch to wasabi 2021-04-12 23:31:37 +02:00
Niko Storni
7c7ceed333 further improve language detection
strip URLs from description to get better results
2021-03-25 19:07:26 +01:00
Niko Storni
198473b62b improve language detection 2021-03-25 18:47:34 +01:00
Niko Storni
d53d0a1d52 fix issues with unicode claim names
fix issue with streams to abandon
2021-03-23 01:18:26 +01:00
Niko Storni
55577201a4 remove livelock 2021-03-17 16:40:37 +01:00
Niko Storni
cacd21f840 fix bugs with claim names and unicode
update tests and improve logging
2021-03-15 22:50:23 +01:00
Niko Storni
9799b0a732 allow claim names with non latin chars
upgrade to latest lbrynet
improve e2e test
2021-03-04 03:04:52 +01:00
Niko Storni
e985a60761 improve upload time detection 2021-03-01 23:34:19 +01:00
Niko Storni
4b4cee9fcc handle weird state 2021-02-25 05:05:28 +01:00
Niko Storni
acca80dc70 fix unmarshal issue 2021-02-22 19:46:22 +01:00
Niko Storni
053c618458 fix another bug with youtube-dl 2021-02-22 17:29:52 +01:00
Niko Storni
2cf3526c74 switch back to youtube-dl for downloads only 2021-02-18 23:28:55 +01:00
Niko Storni
d7e194cb5c update chrome UA
change throttle timeout to 48 hours
2021-02-11 23:44:48 +01:00
Niko Storni
9a8b1922fe fix channel list 2021-02-11 21:43:58 +01:00
Niko Storni
12d627bd35 fix stupid error 2021-01-12 16:49:54 +01:00
Niko Storni
9397a40c9f disable unsent tips check as we don't really use it now
wait a little bit longer on new blocks
2020-12-16 16:59:07 +01:00
Niko Storni
6a4093f3f8
fix youtube channel info parsing 2020-11-27 13:12:01 -05:00
Niko Storni
bac2969295 prevent panic failure 2020-11-19 15:58:31 +01:00
Niko Storni
5dfd8dee1b change default videos to sync
add length check for short videos
potentially fix blockchain.db issue
2020-11-19 03:11:23 +01:00
Niko Storni
06c43c3f71 use youtube-dlc instead 2020-11-10 18:18:07 +01:00
Niko Storni
881d86799b improve logging 2020-11-04 18:13:30 +01:00
Niko Storni
f7e4366a67 fix logging 2020-11-04 17:22:27 +01:00
Niko Storni
5b5f7e1c68 use video hard failures 2020-11-03 22:03:38 +01:00
Niko Storni
fecf67118c remove unused flag
always go through syncing first
don't sync videos shorter than 7 seconds
refactor code in video error handling
add interface to handle hard video failures (incomplete)
2020-11-03 21:41:39 +01:00
Niko Storni
beade71aa6 fix bugs 2020-11-03 02:14:01 +01:00
Niko Storni
ca8ff505d4 add status to list
resume on disk usage overflow
2020-10-27 20:57:32 +01:00
Niko Storni
f5f12e1560 add support for multiple queues
add support for blockchain.db pruning via new status
2020-10-27 19:50:10 +01:00
Niko Storni
c4693c221f update lbrynet support
avoid failures when interrupted by user
2020-10-22 23:14:21 +02:00
Niko Storni
692c796770 handle non existing iso639-1 better 2020-10-20 17:43:25 +02:00
Niko Storni
de798ade36 fix canceled by stopper error 2020-10-07 03:40:19 +02:00
Niko Storni
a93c2c4538 add support for own S3 thumbnails storage 2020-10-07 03:10:54 +02:00
Niko Storni
53e0b7c47b fix bug that messes with counts 2020-09-25 21:44:14 +02:00
Niko Storni
070dda8739 add language detection for videos 2020-09-21 19:12:23 +02:00
Niko Storni
f773569920 Merge branch 'fuck-yt' 2020-09-01 20:50:31 +02:00
Niko Storni
ad6fa4d725 update dependencies 2020-09-01 20:27:39 +02:00
Niko Storni
d4ca71a89d fix channel description 2020-08-31 21:27:39 +02:00
Niko Storni
c53cf4c1b3 prevent channels from failing when manually killed 2020-08-26 19:05:16 +02:00
Niko Storni
ad5a30da9e work around transfer failures 2020-08-25 18:44:44 +02:00
Niko Storni
ecda80b02d bypass socialblade block
stop using socialblade for now
2020-08-21 00:15:14 +02:00
Niko Storni
24cf937e14 retry on support list failures 2020-08-18 01:09:53 +02:00
Niko Storni
0b002c8228 lots of bug fixes and improvements 2020-08-18 00:03:38 +02:00
Niko Storni
a56166ee51 refactor youtube-dl execution process 2020-08-12 19:44:57 +02:00
Niko Storni
ddca850c17 nevermind... re-enable ipv6 (i should make it a flag) 2020-08-12 04:17:30 +02:00
Niko Storni
31ce612e2e temporarily disable ipv6 2020-08-12 03:38:52 +02:00
Niko Storni
eb8900c66a don't waste loops
fix a deadlock?
2020-08-12 03:37:18 +02:00
Niko Storni
67da4142d5 fix old transferred channels 2020-08-10 18:33:05 +02:00
Niko Storni
fb0e567caf be more specific on video failures 2020-08-08 03:29:57 +02:00
Niko Storni
7e83b17b69 add fallback for socialblade API failures 2020-08-08 02:31:26 +02:00
Niko Storni
7c02c5b92d refactor structures
fix various bugs
2020-08-08 01:12:55 +02:00
Niko Storni
2a33f44317 fix e2e tests 2020-08-07 15:24:13 +02:00
Niko Storni
b7037900f8 fix thumbnail download 2020-08-06 23:53:03 +02:00
Niko Storni
5d230a6b54 potentially fix looping
add limits by db
2020-08-06 20:32:49 +02:00
Niko Storni
f0280b51b4 fix time parsing for real 2020-08-06 02:12:05 +02:00
Niko Storni
775e4881cb invert logic so that we don't flood slack 2020-08-06 00:30:48 +02:00
Niko Storni
ec9f46f552 fix release date bug 2020-08-06 00:25:54 +02:00
Alex Grintsvayg
6c6e93cefc
go mod tidy 2020-08-05 15:48:13 -04:00
Niko Storni
eab9bcf1ff
don't wait for confirmations unnecessarily
fix sqlite crap
2020-08-05 15:47:53 -04:00
Niko Storni
751bc84ce5
grab channel info without APIs
fix new channel syncs
fix e2e tests
2020-08-05 15:47:53 -04:00
Mark Beamer Jr
41fd9f6844
fix string parameter for error message 2020-08-05 15:47:53 -04:00
Mark Beamer Jr
7f1906d58b
change FPS to float from int 2020-08-05 15:47:53 -04:00
Mark Beamer Jr
5be3551abe
Mark video failed if we cannot get video info instead of failing the sync
Turn on error log after fixing lockup
Try different user agents if calls fail
Prevent double processing a channel in a row, just fail the sync instead
2020-08-05 15:47:53 -04:00
Mark Beamer Jr
fc18151d77
Increase max db limit
Increase wallet startup time allotment for big channels
Set FPS to float instead of int
2020-08-05 15:47:53 -04:00
Mark Beamer Jr
13543b20e9
Don't use api unless the video is from the past 5 days, otherwise just use the upload date from youtube donwloader
change interface to int for now known field.
2020-08-05 15:47:53 -04:00
Mark Beamer Jr
4eba7200d8
user agent breaks it 2020-08-05 15:47:53 -04:00
Mark Beamer Jr
cd11e82676
Dont always fall through on every error.
Add user agent to calls
2020-08-05 15:47:53 -04:00
Mark Beamer Jr
c7c220ecd3
Dont always fall through on every error. 2020-08-05 15:47:53 -04:00
Mark Beamer Jr
4d56339756
fixed appending args 2020-08-05 15:47:53 -04:00
Mark Beamer Jr
140353097e
added logging 2020-08-05 15:47:53 -04:00
Mark Beamer Jr
a1caea4a28
added logging 2020-08-05 15:47:53 -04:00
Mark Beamer Jr
b1e10e7b09
Add retry on IP failures 2020-08-05 15:47:53 -04:00
Mark Beamer Jr
ad27425471
Add ip switching for being blocked 2020-08-05 15:47:53 -04:00
Mark Beamer Jr
81e9378b9d
Add new fall through errors for release time 2020-08-05 15:47:53 -04:00
Mark Beamer Jr
64040ea67a
Add calls to internal-apis and add get released date if available. 2020-08-05 15:47:47 -04:00
Mark Beamer Jr
d99e200178
Add check for already published videos using new video-state api. 2020-07-29 00:12:23 -04:00
Alex Grintsvayg
357aebbcce
less loud 2020-07-28 22:40:56 -04:00
Alex Grintsvayg
0eef62b5fd
i made a huge mess. sorry future me, when you're digging through this 2020-07-28 21:34:08 -04:00
Alex Grintsvayg
a3dd3dc626
WIP: trying to get the accurate api to work 2020-07-28 12:47:28 -04:00
Alex Grintsvayg
f942bf8025
get upload time from https://www.invid-project.eu/tools-and-services/invid-verification-plugin/ 2020-07-28 11:20:22 -04:00
Alex Grintsvayg
a05864404d
use local timezone for youtube-dl, even though its not certain 2020-07-27 20:59:47 -04:00
Alex Grintsvayg
70ad891dfa
use durations 2020-07-27 19:35:07 -04:00
Mark Beamer Jr
8e61cde0a0
turn of std error 2020-07-27 17:31:46 -04:00
Alex Grintsvayg
8600077caa
no panics. thanks Beam-master 2020-07-27 17:27:50 -04:00
Mark Beamer Jr
e7fdd21bac
commit some more stuff 2020-07-27 17:19:15 -04:00
Mark Beamer Jr
0bb6b6d833
commit my shit 2020-07-27 17:14:18 -04:00
Alex Grintsvayg
b59ef28267
punting on ChannelInfo for now 2020-07-27 16:02:52 -04:00
Mark Beamer Jr
e926a2c1f6
Fix quickSync && maxVideos 2020-07-27 15:59:14 -04:00
Alex Grintsvayg
1369ed0b48
replace yt channel video count with a scrape of socialblade.com 2020-07-27 15:51:06 -04:00
Mark Beamer Jr
f39fc11697
Add downloader to create api
adjust GetVideosToSync
2020-07-27 15:42:45 -04:00
Alex Grintsvayg
bffc0823be
more 2020-07-27 14:52:21 -04:00
Alex Grintsvayg
edbb22fcf0
rename 2020-07-27 14:49:37 -04:00
Alex Grintsvayg
843303301a
move all youtube api calls into a single place 2020-07-27 14:48:05 -04:00
Niko Storni
be7fd7ddd8
Merge branch 'latest-lbry.go' 2020-07-21 16:59:26 -04:00
Niko Storni
bfcfe5f36b
fix e2e 2020-07-21 16:59:09 -04:00
Alex Grintsvayg
4c4da93ef3
bump lbry.go dependency to be compatible with internal-apis dependency bump 2020-07-21 13:55:34 -04:00
Niko Storni
c2b5878daa
don't delete lbry first videos from the db 2020-07-21 13:54:28 -04:00
Niko Storni
749d5095c9 fix semver 2020-06-11 18:45:56 +02:00
Niko
ba77b61ae0
Merge pull request #56 from lbryio/fix-travis
fix travis try 2
2020-06-09 21:48:40 -04:00
Niko Storni
07915ce697 fix travis try 2 2020-06-10 03:44:34 +02:00
Niko Storni
4b2221ce5e fix travis (try 1) 2020-06-10 03:38:42 +02:00
Niko Storni
1d5b69bfe6 add automatic sleep until quota reset
re-enable ipv6
2020-06-10 03:32:45 +02:00
Niko Storni
4d915587e0 temporarily disable ipv6 2020-05-30 02:18:42 +02:00
Niko Storni
83f2c28c20
fix nil ptr 2020-05-22 12:47:18 -04:00
Niko Storni
47e467057d change timeouts to avoid long wait times 2020-05-20 04:28:29 +02:00
Niko Storni
1ae32d638b fix prom allowed chars 2020-05-19 23:24:16 +02:00
Niko Storni
7907ee3579 add prometheus metrics 2020-05-19 23:13:01 +02:00
Niko Storni
414ed1c130 or to and 2020-05-14 17:28:15 +02:00
Niko Storni
3bb17b09d3 fix never ending condition 2020-05-14 06:47:19 +02:00
Niko Storni
046f46267e update lbry.go dependency 2020-05-14 05:49:49 +02:00
Niko Storni
adc3ffe194 update lbrynet support 2020-05-14 04:04:52 +02:00
Niko Storni
76301d40ba only wait a maximum of 2 blocks before transfering
fix debug string
2020-05-08 16:33:29 +02:00
Niko Storni
963c450730 allow all IPv6 to be used 2020-05-05 20:13:40 +02:00
Niko Storni
6e7bb994d0 don't keep reusing the same IP for updates 2020-05-05 05:58:34 +02:00
Niko Storni
95890cbabf prevent live streams from syncing
reduce timeout to 20 minutes
2020-04-29 16:15:22 +02:00
Niko Storni
69c2a91ec7 fix bug in video length 2020-04-23 16:37:35 +02:00
Niko Storni
fbe09a692c fix 403 error 2020-04-21 20:56:14 +02:00
Niko Storni
0ccc6e7979 add temporary logging 2020-04-21 16:26:34 +02:00
Niko Storni
3eb74cc8b5 log video length failures 2020-04-19 02:38:31 +02:00
Niko Storni
9bfb1656fe fix length checks on single syncs 2020-04-10 17:46:07 +02:00
Niko Storni
5d40d24804 fix max length issue for bigger youtubers
fix issue with no-playlist-found channels
reduce the max db size limit
2020-04-08 23:14:10 +02:00
Niko Storni
79d3aadda8 add utxo_release as hotfix for stuck balances 2020-04-01 04:49:24 +02:00
Niko Storni
eba59a0806 change utxo_list signature 2020-04-01 04:44:05 +02:00
Niko Storni
3fd4909cd1 replace support abandon with txo_spend
upgrade lbry.go library
upgrade lbrynet support
possibly introduce new bugs ;)
2020-03-31 04:30:02 +02:00
Niko Storni
b59372c1d8 don't prevent channels with 0 youtube videos from transferring 2020-03-31 02:45:53 +02:00
Niko Storni
c9e713e9df do not attempt upgrading channels no longer in our control 2020-03-27 18:16:35 +01:00
Niko Storni
bc72d2f1c4 do not fail if database is missing 2020-03-25 18:28:42 +01:00
Niko Storni
df539e5d01 fix ledger mess 2020-03-25 18:12:50 +01:00
Niko Storni
1b89104101 clean up blockchain.db if it grows too big 2020-03-25 17:53:47 +01:00
Niko Storni
3dc30c3b98 speed up duration checks 2020-03-12 19:22:23 +01:00
Niko Storni
259df8d257 restrict length amount on small youtubers 2020-03-12 04:08:50 +01:00
Niko Storni
5460224cd6 upgrade SDK
fix UTXO creation time waste
2020-02-24 15:52:32 +01:00
Niko Storni
29479a73a7 update lbry.go/lbrycrd dependencies 2020-02-11 20:24:05 +01:00
Niko Storni
625f339709 Merge branch 'latest_lbrycrd_support' 2020-02-11 20:22:59 +01:00
Mark Beamer Jr
d6fcfd888a Upgrade to latest lbrycrd support for internal-apis 2020-02-11 20:22:50 +01:00
Thomas Zarebczan
5f7d9d4940
Merge pull request #51 from ykris45/patch-1
Update LICENSE
2020-02-03 17:03:52 -05:00
YULIUS KURNIAWAN KRISTIANTO
826fcb78ea
Update LICENSE 2020-02-03 04:08:28 +07:00
Niko Storni
45e87c3007 don't ever re-create a channel if one was previously created 2020-01-14 04:34:33 +01:00
Niko Storni
cdee0d0de7 fix typo 2020-01-14 04:14:14 +01:00
Niko Storni
e30f21dc85 fail sync when channel is abandoned 2020-01-14 04:10:20 +01:00
Niko Storni
df88c7e952 update docker compose 2020-01-14 03:40:30 +01:00
Niko Storni
62a1a0eed5
update goreleaser 2020-01-11 22:11:44 -05:00
Niko Storni
93f9185c71
fix bug when publishing 2020-01-11 22:01:40 -05:00
Niko Storni
ef62242f42 add cookies to youtube-dl
minor fixes
2020-01-03 15:59:05 +01:00
Niko Storni
4a356e86da add quicksync for better quota usage 2019-12-27 18:12:41 +01:00
Niko Storni
ac95ca57af halt when quota is exceeded 2019-12-27 04:15:48 +01:00
Niko Storni
c28d5a716b send warning to slack when video processing panics 2019-12-27 01:39:27 +01:00
Niko Storni
59471f9c26 reduce sync delay
fix stopper bug
2019-12-27 01:27:29 +01:00
Niko Storni
8028c6621c add temporary workaround to release all IPs
speed up one failure
2019-12-26 17:52:44 +01:00
Niko Storni
8abf6ad255 add debug statement 2019-12-24 23:27:16 +01:00
Niko Storni
1f9a645c57 support create fixes
various bug fixes
2019-12-24 05:00:16 +01:00
Niko Storni
a5657aaf46 refactor download
get rid of goto
2019-12-20 18:49:33 +01:00
Niko Storni
b6861dae9b don't fail on abandoned videos 2019-12-19 03:13:22 +01:00
Niko Storni
cff147bb95 work around missing inputs error 2019-12-18 20:24:50 +01:00
Niko Storni
54d04dcf2c add IP debugging
improve process halting
rename improper language code for hebrew
2019-12-18 18:22:15 +01:00
Niko Storni
65c4f99c1c make sure abandons spend from default account only 2019-12-16 04:53:15 +01:00
Niko Storni
68c0fd9ed7 add new throttling detection 2019-12-16 00:29:56 +01:00
Niko Storni
2bde06e4b9 fixes to throttling 2019-12-14 14:58:04 +01:00
Niko Storni
5df6db6e96 fix bug with non-related publishes 2019-12-14 06:43:01 +01:00
Niko Storni
62dfdc1adb fix possible resource leak 2019-12-14 05:49:25 +01:00
Niko Storni
fe55304184 fix anti-throttling
fix duplicates with multi-channels
2019-12-13 18:20:41 +01:00
Mark Beamer Jr
bb76500f04
Update e2e test for publish table migration in internal-apis 2019-12-12 22:28:19 -05:00
Niko Storni
4e2efec66e disable logging for reflection 2019-12-11 13:13:30 +01:00
Mark
8d650e032c
Merge pull request #49 from lbryio/merge_e2e
Merge chainquery and lbry databases
2019-12-10 20:01:26 -05:00
Mark Beamer Jr
f5d97957e7
Fixed the e2e test. It wasn't checking the right claims and videos. Looks like a copy paste error. 2019-12-10 18:43:44 -05:00
Niko Storni
36f539ef5d change ip throttling management 2019-12-10 23:02:56 +01:00
Mark Beamer Jr
77461f71af
Merge chainquery and lbry databases to account for claim table being required by internal-apis due to replication changes. 2019-12-10 15:45:29 -05:00
Niko Storni
caca13de61 increase anti-throttle timeout 2019-12-09 18:09:31 +01:00
Niko Storni
7bdc117ba0
upgrade ytsync to latest SDK
upgrade e2e tests
2019-12-08 11:49:27 -05:00
Niko Storni
fc575dac80
improve logging
fix ipv6 switching
2019-12-08 10:31:15 -05:00
Niko Storni
d520ff2c68 change cache control for thumbnails 2019-11-11 15:11:34 -05:00
Niko Storni
25e49f8ff2 fix live lock 2019-10-25 18:35:09 +02:00
Niko Storni
77d0efcf3c refactor tags_mapping name
remove failure state for transfers
2019-10-21 15:44:24 +02:00
Niko Storni
291a105269 improve getDefaultAccount
use default account on more calls
improve API logging
fix memory leak
2019-10-16 19:38:45 +02:00
Thomas Zarebczan
682117a030
Merge pull request #45 from StrikerRUS/license
added license file
2019-10-15 11:52:59 -04:00
Niko Storni
6d2e4aa6e6 list all claims at once (faster) 2019-10-15 00:29:26 +02:00
Niko Storni
77749f05ba APIs hiccups will no longer make ytsync crash 2019-10-14 17:51:26 +02:00
Niko Storni
10a855a8ba fix nil ptr dereference
fix bug in transfers
2019-10-12 09:49:49 +02:00
Niko Storni
a9e76149e8 parallelize support abandons
update lbrynet support in e2e test
stop using reflector on regtest!
change e2e channel target for more videos
possibly fix a deadlock
2019-10-11 19:42:03 +02:00
Niko Storni
dd4aebdba4 make video transferring async
fix bug in videosLimit
change test subject to have more videos
2019-10-11 01:00:04 +02:00
Niko Storni
b409d36de0 remove unnecessary output 2019-10-10 18:17:13 +02:00
Niko Storni
30af4a0136 update lbry.go to v2 library
update modules
add slightly more LBCs for the channel claim to ensure fees are covered
2019-10-10 16:51:40 +02:00
StrikerRUS
a3b660fab5 aadded license file 2019-10-09 00:11:55 +03:00
Niko Storni
65daeec216 resolve issue with channels attempting re-transfer 2019-10-08 14:42:57 +02:00
Niko Storni
ad1704fb39 remove obsolete check 2019-10-08 02:13:17 +02:00
Niko Storni
44e6cb5ddc fix bug where newly published and transferred videos caused an error
rename variable for better clarity
2019-10-08 01:59:18 +02:00
Niko Storni
5171acc007 refactor integrity check
add checks for improper transfer state
add checks for unsent supports
only use default account for operations that only work on it
add more logging
2019-10-08 01:38:39 +02:00
Niko Storni
946314da94 update lbry.go to fix claim_search 2019-10-03 22:51:21 +02:00
Niko Storni
5961327adf reduce bid even further to avoid lockups 2019-10-03 21:06:47 +02:00
Niko Storni
d2bbb36312 improve detection for own claims vs their claims 2019-10-01 16:55:43 +02:00
Niko Storni
c7272e988a reduce amount to avoid dust 2019-09-27 20:17:49 +02:00
Niko Storni
5b9846bcca fix pointer issue 2019-09-27 19:29:42 +02:00
Niko Storni
8d849b8f06 do not retry unavailable videos 2019-09-27 16:12:21 +02:00
Niko Storni
c41c368f34 fix transfer bugs 2019-09-27 16:10:57 +02:00
Niko Storni
7ce4c4a2ce fix lbrynet shutdown check in e2e 2019-09-25 05:07:19 +02:00
Niko Storni
0d473cc4f6 add support for transferring videos after transferring everything 2019-09-25 04:38:49 +02:00
Niko Storni
2f701a887a fix double counting
fix transferred check
2019-09-25 04:15:16 +02:00
Niko Storni
8d56ef852f fix e2e
fix database transferred flag
2019-09-25 03:30:07 +02:00
Niko Storni
849ff11bfd import public keys if available 2019-09-24 20:42:17 +02:00
Niko Storni
45982f30c5 fix e2e test 2019-09-24 19:09:39 +02:00
Niko Storni
07413b499a update SDK support
fix bug where videos would publish to the wrong address
2019-09-24 18:51:06 +02:00
Niko Storni
92d8ef75f8 update e2e test 2019-09-19 20:03:55 +02:00
Niko Storni
1b08bb0e61 fix bug with non latin titles for claim names
hard exit when a duplicate name is attempted
2019-09-10 11:43:20 +02:00
Niko Storni
bdd55c9965 fix balance refill issue
add additional debugging
2019-09-04 19:24:11 +02:00
Niko Storni
14668c339e refactor sync flags
add disable-transfer flag
2019-08-30 21:31:58 +02:00
Niko Storni
7d38aa7b29 add republishing on non-available videos
enhanced logging
2019-08-30 19:35:04 +02:00
Niko Storni
3c3ea2138e adjust support management
adjust e2e script
2019-08-28 21:08:25 +02:00
Niko Storni
7b23235f83 Merge branch 'transfers' 2019-08-28 15:26:38 +02:00
Niko Storni
79ae630bc4 update reflector lib 2019-08-28 15:25:53 +02:00
Niko Storni
0d8db26e96 use the right errors lib
update dependencies
2019-08-28 15:25:53 +02:00
Mark Beamer Jr
bacb91e82a Add supporty to create supports
Update lbry.go dep to get lbrycrd commands
2019-08-28 15:25:53 +02:00
Niko Storni
577411cf50 fix e2e
add support for supports (heh)
2019-08-28 15:25:53 +02:00
Mark Beamer Jr
e98061f2eb Add chainquery to youtube sync repo to be used for e2e testing of support transfer
Remove timeout for wait-for-it calls
2019-08-28 15:25:53 +02:00
Mark Beamer Jr
4548c41082 Add chainquery to youtube sync repo 2019-08-28 15:25:53 +02:00
Niko Storni
f7c80c2e5d refactor transfer process
start working on supports
2019-08-28 15:25:53 +02:00
Mark Beamer Jr
2d6e53be32 Added creation of supports for the e2e test 2019-08-28 15:25:53 +02:00
Mark Beamer Jr
419434db28 Add success message when the e2e test passes. 2019-08-28 15:25:53 +02:00
Mark Beamer Jr
cd6ddaaa85 Update lbrynet to match wallet server version. 2019-08-28 15:25:53 +02:00
Niko Storni
4d16a1d6e8 fix e2e tests
update wallet server
cleanup go.sum
Fix broken balance logging
Prevent failures once channel is transferred
Wait for all UTXOs to confirm before transferring
Fix various bugs
2019-08-28 15:25:53 +02:00
Mark Beamer Jr
1fb1077a94 Update curl call to show response
Update channel status call to pass the correct name of the parameter
2019-08-28 15:25:53 +02:00
Mark Beamer Jr
2c9c999c71 Add marking video status when transfer of claim is complete.
Add error handling when transfer of claim fails.
2019-08-28 15:25:53 +02:00
Mark Beamer Jr
c754562f96 Update to leverage lbrycrd v0.17.2.1, lbrynet v0.39.1, walletserver v0.39.1 2019-08-28 15:25:53 +02:00
Niko Storni
7a7de03c0f adjust account balance
minor fix
2019-08-28 15:25:53 +02:00
Niko Storni
1b9ed266e0 move channel status call to proper region
fix shadowed errors
refactor code to be more readable and consistent
get video transfer state
2019-08-28 15:25:53 +02:00
Mark Beamer Jr
eb2f6273e4 Add transfer of channel after all video's have been transferred
Add channel status call to notify external db that the channel has been transferred.
2019-08-28 15:25:53 +02:00
Mark Beamer Jr
45cea808ed Add marking video status when transfer of claim is complete.
Add error handling when transfer of claim fails.
2019-08-28 15:25:53 +02:00
Mark Beamer Jr
9a5b6b4e56 Update to leverage lbrycrd v0.17.2.1, lbrynet v0.39.1, walletserver v0.39.1 2019-08-28 15:25:53 +02:00
Mark Beamer Jr
d63aba568d Add transfer logic as post sync process step.
Add authtoken for youtuber in test data setup to call transfer api
Update e2e test to include scenario where we transfer channel.
2019-08-28 15:25:53 +02:00
Niko Storni
4cc0b71279 process failures sequentially 2019-08-20 11:33:06 +02:00
Niko Storni
b8d66cb8c0 use latest lbry.go/lbrycrd release 2019-08-20 01:37:13 +02:00
Niko Storni
eff76d22eb update lbry.go version 2019-08-16 17:45:19 +02:00
Niko Storni
8ebdf33213 update lbry.go 2019-08-16 13:49:31 +02:00
Niko Storni
81616c1d96 refactor channel selector
remove unused code
2019-08-15 19:31:43 +02:00
Niko Storni
b477d18a16 fix claim_search 2019-08-14 17:44:39 +02:00
Niko Storni
5cb7f72ef7 remove useless logging 2019-08-14 05:18:57 +02:00
Mark Beamer Jr
1b1d7f5ec0
Add build argument for docker build.sh file
Update compose to use v0.38.6 of lbrynet
2019-08-13 23:03:37 -04:00
Niko Storni
adeabc97f8 Merge branch 'setchannelcerts' 2019-08-14 04:30:21 +02:00
Mark Beamer Jr
d73c6945dd Fix travis.yml non interactive 2019-08-14 04:22:59 +02:00
Mark Beamer Jr
7a76a4b79d Upgrade to 1.0.17 of lbry.go 2019-08-14 04:22:59 +02:00
Mark Beamer Jr
cd57e1ab08 Always send channel export 2019-08-14 04:22:59 +02:00
Mark Beamer Jr
b90e367e82 Addressed comments 2019-08-14 04:22:59 +02:00
Mark Beamer Jr
e477759395 change to master tag of internal-apis docker image 2019-08-14 04:22:59 +02:00
Mark Beamer Jr
6438048f40 Add CleanOnStartup for local testing and e2e testing
Update methods to be refactored into util package to be used in many locations.
Add end to end test of ytsync for CI
2019-08-14 04:22:59 +02:00
Mark Beamer Jr
73937a541b Add var/tmp volume for ytsync tmp files communicated between lbrynet and ytsync
Add reference to wallet dir env var for when the wallet file is uploaded so it is cleared properly when testing locally
2019-08-14 04:22:59 +02:00
Mark Beamer Jr
517c2c56a3 Add disabling of blob uploading for local testing until reflector docker image can be created( not urgent now ).
Add ability to generate blocks when in regtest mode and using docker.
Add refactor of environment variable accessing.
2019-08-14 04:22:59 +02:00
Mark Beamer Jr
c84e1e6fb9 Fixed bugs around sending channel cert and enabling local testing with docker. 2019-08-14 04:22:59 +02:00
Mark Beamer Jr
f26712b022 Added e2e test docker-compose and supporting work so that ytsync can run independently of the mainnet for testing/CI 2019-08-14 04:22:59 +02:00
Mark Beamer Jr
5e56ac0516 Add set channel cert on new channels. 2019-08-14 04:22:59 +02:00
Niko Storni
0308da2e4c add timeout to daemon startup 2019-08-13 23:05:09 +02:00
Niko Storni
df63b91f61 update reflector libs 2019-08-06 20:57:36 +02:00
Niko Storni
03161a5a02 prefer 1080p videos 2019-08-05 20:08:46 +02:00
Niko Storni
6dc2c39ee6 limit description length
rather than limiting lines count
2019-08-02 10:01:33 -04:00
Niko Storni
87211e802d reduce tags length 2019-08-02 09:05:24 -04:00
Niko Storni
3ba0c97634 fix flags 2019-07-25 18:33:05 -04:00
Niko Storni
9d65fce6bc do not apply limits if disabled 2019-07-25 18:22:56 -04:00
Niko Storni
d5ed4ce753 facilitate streaming by moving the moov atom 2019-07-25 18:15:14 -04:00
Niko Storni
0aedf80386 limit throttling to only queued channels 2019-07-22 16:24:13 -04:00
Niko Storni
c193dae95a temporary throttling of channels to sync for tom to review them 2019-07-22 16:16:23 -04:00
Niko Storni
0c47510785 revert quoting paths 2019-07-22 02:51:13 +02:00
Niko Storni
8f556a86c8 fix missing fragments for good
never retry them again
2019-07-22 02:45:38 +02:00
Niko Storni
64ab2490fe fix bugs
improve debugging
2019-07-22 02:27:14 +02:00
Niko Storni
1760935edf don't retry missing fragments
lower syncing quality for now
2019-07-22 02:09:18 +02:00
Niko Storni
60f2585f33 improve throttling
refactor slack wrapper
cleanup dependencies
2019-07-15 16:16:02 -04:00
Niko Storni
df08d42d9b improve ip re-selection 2019-07-13 12:15:19 +02:00
Niko Storni
1b55033bb8 don't retry big videos 2019-07-13 11:40:50 +02:00
Niko Storni
1331e5769f actually use the functions I create... 2019-07-13 11:33:40 +02:00
Niko Storni
43f745ae20 improve ip shuffling (prevent ips in use from being dealt) 2019-07-12 23:54:48 +02:00
Niko Storni
8c2a8262e6 fix bugs 2019-07-12 23:20:01 +02:00
Niko Storni
6f486717da fix stop pattern
fix noretry string
fix youtube-dl param
2019-07-12 22:58:34 +02:00
Niko Storni
a3fcd67611 better handle interruptions by user
refactor IP throttling in its own package
2019-07-12 21:32:49 +02:00
Niko Storni
ae1ffb60c5 add ip throttling and better pooling
fix nil pointer dereference
2019-07-12 20:42:44 +02:00
Niko Storni
fa24d83ae9 add max length filter 2019-07-12 02:39:24 +02:00
Niko Storni
2f11d99a97 stop on rare bug/failure 2019-07-12 02:21:27 +02:00
Niko Storni
bab01a6354 prefer IPv4 over IPv6
youtube sucks and they ban /64 subnet blocks on IPv6
2019-07-12 01:54:34 +02:00
Niko Storni
89fe1e99d6 improve ipv6 validation
fix audio merge issue
2019-07-12 01:28:00 +02:00
Niko Storni
ec81af0857 fix download filters 2019-07-12 01:04:45 +02:00
Niko Storni
51cb7f5e34 improve youtube antithrottle
fix logging
2019-07-11 19:14:15 +02:00
Niko Storni
8ad2acc936 remove original downloader
add locking to IP selection to avoid concurrent access
2019-07-11 17:32:46 +02:00
Niko Storni
b8caf3f82b add IPv6 source pooling to bypass youtube throttling 2019-07-11 16:22:58 +02:00
Niko Storni
b34d328cba halt ytsync when youtube throttles us 2019-07-10 22:47:01 +02:00
Niko Storni
a0dcb2216d close db handles 2019-07-10 22:23:57 +02:00
Niko Storni
9d6a7e03cd update prism dependency 2019-07-10 21:07:48 +02:00
Niko Storni
e44e272689 adjust config name 2019-07-10 18:04:05 +02:00
Niko Storni
87f7d4c9f9 recreate directory when emptied 2019-07-10 17:42:28 +02:00
Niko Storni
173e62d286 add check to avoid deleting unreflected blobs 2019-07-10 17:39:58 +02:00
Niko Storni
013760d48d delete blobs after reflection
remove 15 seconds wait after sync
2019-07-10 17:27:46 +02:00
Niko Storni
5dbad2d2e4 add automatic blob reflection
retry videos that fail due to tx issues
2019-07-10 15:46:54 +02:00
Niko Storni
545ab07013 fail channels that should fail 2019-07-05 17:35:20 +02:00
Niko Storni
c402a1cdef add locking to map reads 2019-06-26 14:40:40 -04:00
Niko Storni
a4c6b2da30 actually count UTXOs 2019-06-25 22:28:43 -04:00
Niko Storni
7f48220a80 remove dist folder... 2019-06-26 03:48:44 +02:00
Niko Storni
9e49110295 fix hash 2019-06-26 03:37:52 +02:00
Niko Storni
4506bdb638 fix bug in utxo counting 2019-06-25 21:12:15 -04:00
Niko Storni
a8551bc9ee fix UTXO waiting logic 2019-06-25 20:45:27 -04:00
Niko Storni
b5c9806dd0 improve space management 2019-06-24 20:43:50 -04:00
Niko Storni
8c349cf7ac fix bug in queues 2019-06-24 04:14:27 -04:00
Niko Storni
f3f40c41ef fix refill balance 2019-06-22 06:08:11 -04:00
Niko Storni
05b65c4cb5 add noretry failure 2019-06-21 16:41:46 +02:00
Niko Storni
69ad13848b fix utxo count
fix one bug
add content type to uploaded thumbs
2019-06-20 21:45:17 +02:00
Niko Storni
e496ad670a limit tag length 2019-06-20 17:43:46 +02:00
Niko
9d8eb32f9f
Merge pull request #28 from lbryio/metadata_upgrade
Add support for new SDK (0.37.*) and support for upgrading channels and claims to new metadata
2019-06-13 20:14:13 +02:00
Niko Storni
6668840b11 fix description not containing youtube link
add workaround to SDK bug not spending the right UTXOs
fix bug in assignments
2019-06-13 19:33:58 +02:00
Niko Storni
e893f68bc1 fix queue pulling
fix address reuse
2019-06-12 22:42:42 +02:00
Niko Storni
8da631eac3 add a limit to UTXOs generation
fix broadcastFee
fix spelling
2019-06-12 05:33:13 +02:00
Niko Storni
1729af0ae5 fix UTXOs management while publishing
fix account_set
improve logging
2019-06-12 05:25:01 +02:00
Niko Storni
dce71d2451 add adjustments to video tracking
reduce minimum refill amount (min account balance supersedes)
revert account_fund change as --everything isn't supported with outputs
fix bugs
2019-06-12 04:44:56 +02:00
Niko Storni
f4e75cf221 rewrite account funding algorithm
lock publishing when UTXOs management is in progress
spend everything when funding UTXOs
update lbry.go
2019-06-12 03:17:59 +02:00
Niko Storni
ae3dad944d add blocking support for publishing and upgrading
add channel wide mappings
2019-06-11 19:18:39 +02:00
Niko Storni
f7f0231c80 add VideoUpgradeFailed staus 2019-06-10 21:59:42 +02:00
Niko Storni
0c42b96332 remove unused script
update readme
2019-06-10 21:43:10 +02:00
Niko Storni
52cd483f47 add single channelwide tag
fix bug
2019-06-10 21:37:13 +02:00
Niko Storni
79dd6d1458 add better handling for database mismatches
improve UTXOs management
fix bugs
2019-06-10 02:23:07 +02:00
Niko Storni
10ad46f464 various bug fixes 2019-06-06 23:25:31 +02:00
Niko Storni
ccebab35da fix bugs 2019-06-06 17:26:10 +02:00
Niko Storni
8dce052fe6 change logic used to upgrade videos 2019-06-06 16:24:20 +02:00
Niko Storni
76e653fb9b add Fee support
lowercase all tags
refactor code
implement and test regexps for tags
2019-06-06 02:16:07 +02:00
Niko Storni
cfe8ff0879
add flag to remove unpublished videos
refactor code
add workaround for SDK bug
add code to remove unpublished videos
fix alt downloader to only produce mp4
add missing height and width
add tags sorting
add a few channel wide tags
2019-06-04 22:21:44 +02:00
72 changed files with 7949 additions and 1719 deletions

11
.gitignore vendored
View file

@ -1 +1,10 @@
bin/
bin/
e2e/persist
e2e/supporty/supporty
.env
blobsfiles
ytsync_docker
e2e/config.json
e2e/cookies.txt

View file

@ -9,12 +9,13 @@ builds:
- amd64
ldflags:
- -X "{{ .Env.IMPORTPATH }}/meta.semVersion={{ .Tag }}" -X "{{ .Env.IMPORTPATH }}/meta.version={{ .Env.VERSIONSHORT }}" -X "{{ .Env.IMPORTPATH }}/meta.versionLong={{ .Env.VERSIONLONG }}" -X "{{ .Env.IMPORTPATH }}/meta.commitMsg={{ .Env.COMMITMSG }}"
archive:
name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}'
replacements:
linux: Linux
amd64: x86_64
format: zip
archives:
- id: zip
name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}'
replacements:
linux: Linux
amd64: x86_64
format: zip
checksum:
name_template: 'checksums.txt'
snapshot:
@ -24,5 +25,4 @@ changelog:
filters:
exclude:
- '^docs:'
- '^test:'
- '^test:'

View file

@ -1,15 +1,40 @@
os: linux
dist: xenial
dist: bionic
language: go
go_import_path: github.com/lbryio/ytsync
go:
- 1.11.5
- master
- 1.17.x
install: true
cache:
directories:
- $HOME/.cache/go-build
- $HOME/gopath/pkg/mod
services:
- docker
addons:
apt:
update: true
packages:
- ffmpeg
- tree
- python3-pip
before_script:
- sudo pip3 install -U yt-dlp
- sudo add-apt-repository -y ppa:savoury1/ffmpeg4
env:
global:
- GO111MODULE=on
#GITHUB_TOKEN
- secure: "Ps3KocRP5xnM3/uA99CeYhDTVxRIuW7fGyrtqBeRWZW0cXzeA4XCTKxqcFbrPUPw67XkrBVgE58JDdWoQEJ7tm67PjMm/ltp5Evhx/QAJDh+YSofXyGDVpG1mrTZFI66R3NVVJLkSGALMkuWWXvfYZeU//AworJbyRoaIK/CVt5OP23i5N4tdd5UXc5dfLuYqnKRynyMmCkz9c3yEIQMXoPhG2hx7l7L2BeMJvcKmVhkSN7nQayjnrbUXGm/IRqrb88lvkyBevN5E3IB2V5IKEieIPZjbD/N0IfcnAt89Z96tgDhtIbx3ZvXm92lsvHA8buqQpG9d2AmSi6GKs64lQcnGeM5o0wER2JHWl1OSa1Nr/UAo5Xb/PM65Yt3yZE8AuMKHBmbfDSBzdkTXx58AeDzFUd3kMXD/fFjeQQWyXFlOss3ygH9SObl827Txmz9OJqZaxabs5Q3AP6m3EjKjz7zfLfrgpcxJM2WBiU1bN0ZxUgZkImy/CHk5gCZ7vhcnaLiDO4HZnzY/aRJwKYQPE5i0O2nHpIfovqkc0DFBA7U/7Cjin7e1E0UZvF3meLOxMqkfc6X7QTxqQpt2Tej6jlpdxw4CTLwGUhGkAw9IAPkUB3L0EbZ1/ksGhNvGDvUeSTq8hYdMAPmA+k9jS6653V4SQ+qBMy5++tbr5AeZQI="
script:
#- ./e2e/e2e.sh # Hold until we can resolve the /var/tmp issue - talk to beamer/niko
- make
deploy:
provider: script
skip_cleanup: true
@ -17,4 +42,4 @@ deploy:
file: bin/ytsync
on:
repo: lbryio/ytsync
tags: true
tags: true

111
0001-lbry-patch.patch Normal file
View file

@ -0,0 +1,111 @@
From 30380338ba9af01696c94b61f0597131638eaec1 Mon Sep 17 00:00:00 2001
From: Niko Storni <niko@lbry.io>
Date: Mon, 16 Dec 2019 00:13:36 +0100
Subject: [PATCH] lbry-patch
---
youtube_dl/extractor/youtube.py | 45 +++++++++++++++++++++++++--------
1 file changed, 35 insertions(+), 10 deletions(-)
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index b913d07a6..cd66a5b01 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -10,6 +10,7 @@ import random
import re
import time
import traceback
+import subprocess
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
@@ -536,6 +537,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
_GEO_BYPASS = False
+ _WGET_429_RATE_LIMIT = 8191
+ _WGET_BINARY = "wget"
+
IE_NAME = 'youtube'
_TESTS = [
{
@@ -1254,6 +1258,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
+ def _rate_limit_download(self, url, video_id, note=None):
+ if note is None:
+ self.report_download_webpage(video_id)
+ elif note is not False:
+ if video_id is None:
+ self.to_screen('%s' % (note,))
+ else:
+ self.to_screen('%s: %s' % (video_id, note))
+ source_address = self._downloader.params.get('source_address')
+ return subprocess.run([self._WGET_BINARY, '-q', '--limit-rate', str(self._WGET_429_RATE_LIMIT), '--bind-address', source_address, '-O', '-', url], check=True, stdout=subprocess.PIPE).stdout.decode(encoding='UTF-8')
+
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|(?:/[a-z]{2,3}_[A-Z]{2})?/base)?\.(?P<ext>[a-z]+)$',
@@ -1678,7 +1693,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
- video_webpage = self._download_webpage(url, video_id)
+ video_webpage = self._rate_limit_download(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
@@ -1736,10 +1751,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
- video_info_webpage = self._download_webpage(
+ video_info_webpage = self._rate_limit_download(
video_info_url, video_id,
- note='Refetching age-gated info webpage',
- errnote='unable to download video info webpage')
+ note='Refetching age-gated info webpage')
video_info = compat_parse_qs(video_info_webpage)
pl_response = video_info.get('player_response', [None])[0]
player_response = extract_player_response(pl_response, video_id)
@@ -1777,7 +1791,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
# The general idea is to take a union of itags of both DASH manifests (for example
# video with such 'manifest behavior' see https://github.com/ytdl-org/youtube-dl/issues/6093)
self.report_video_info_webpage_download(video_id)
- for el in ('embedded', 'detailpage', 'vevo', ''):
+ for el in ('', 'embedded', 'detailpage', 'vevo'):
query = {
'video_id': video_id,
'ps': 'default',
@@ -1789,11 +1803,22 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
query['el'] = el
if sts:
query['sts'] = sts
- video_info_webpage = self._download_webpage(
- '%s://www.youtube.com/get_video_info' % proto,
- video_id, note=False,
- errnote='unable to download video info webpage',
- fatal=False, query=query)
+
+ if el == '':
+ base_url = 'https://youtube.com/get_video_info?video_id={}'.format(video_id)
+ else:
+ base_url = 'https://youtube.com/get_video_info'
+
+ for q in query:
+ if q is None or q is "":
+ continue
+ if query[q] is None or query[q] is "":
+ continue
+
+ base_url = base_url + "?{}={}".format(q, query[q])
+
+ video_info_webpage = self._rate_limit_download(base_url, video_id)
+
if not video_info_webpage:
continue
get_video_info = compat_parse_qs(video_info_webpage)
--
2.17.1

15
LICENSE Normal file
View file

@ -0,0 +1,15 @@
The MIT License (MIT)
Copyright (c) 2017-2020 LBRY Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the
following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -13,7 +13,7 @@ LDFLAGS = -ldflags "-X ${IMPORT_PATH}/meta.Version=${VERSION} -X ${IMPORT_PATH}/
build:
mkdir -p ${BIN_DIR} && CGO_ENABLED=0 go build ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/${BINARY} main.go
mkdir -p ${BIN_DIR} && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/${BINARY} main.go
clean:
if [ -f ${BIN_DIR}/${BINARY} ]; then rm ${BIN_DIR}/${BINARY}; fi

View file

@ -8,23 +8,17 @@ With the support of said database, the tool is also able to keep all the channel
# Requirements
- lbrynet SDK https://github.com/lbryio/lbry/releases (We strive to keep the latest release of ytsync compatible with the latest major release of the SDK)
- lbrynet SDK https://github.com/lbryio/lbry-sdk/releases (We strive to keep the latest release of ytsync compatible with the latest major release of the SDK)
- a lbrycrd node running (localhost or on a remote machine) with credits in it
- internal-apis (you cannot run this one yourself)
- python3-pip
- yt-dlp (`pip3 install -U yt-dlp`)
- ffmpeg (latest)
# Setup
- make sure daemon is stopped and can be controlled through `systemctl` (find example below)
- extract the ytsync binary anywhere
- add the environment variables necessary to the tool
- export SLACK_TOKEN="a-token-to-spam-your-slack"
- export SLACK_CHANNEL="youtube-status"
- export YOUTUBE_API_KEY="youtube-api-key"
- export LBRY_WEB_API="https://lbry-api-url-here"
- export LBRY_API_TOKEN="internal-apis-token-for-ytsync-user"
- export LBRYCRD_STRING="tcp://user:password@host:5429"
- export AWS_S3_ID="THE-ID-LIES-HERE"
- export AWS_S3_SECRET="THE-SECRET-LIES-HERE"
- export AWS_S3_REGION="us-east-1"
- export AWS_S3_BUCKET="ytsync-wallets"
- create and fill `config.json` using [this example](config.json.example)
## systemd script example
`/etc/systemd/system/lbrynet.service`
@ -55,21 +49,26 @@ Usage:
Flags:
--after int Specify from when to pull jobs [Unix time](Default: 0)
--before int Specify until when to pull jobs [Unix time](Default: current Unix time) (default 1582834707)
--before int Specify until when to pull jobs [Unix time](Default: current Unix time) (default 1669311891)
--channelID string If specified, only this channel will be synced.
--concurrent-jobs int how many jobs to process concurrently (default 1)
-h, --help help for ytsync
--limit int limit the amount of channels to sync
--max-length float Maximum video length to process (in hours) (default 2)
--max-length int Maximum video length to process (in hours) (default 2)
--max-size int Maximum video size to process (in MB) (default 2048)
--max-tries int Number of times to try a publish that fails (default 3)
--no-transfers Skips the transferring process of videos, channels and supports
--quick Look up only the last 50 videos from youtube
--remove-db-unpublished Remove videos from the database that are marked as published but aren't really published
--run-once Whether the process should be stopped after one cycle or not
--skip-space-check Do not perform free space check on startup
--status string Specify which queue to pull from. Overrides --update
--stop-on-error If a publish fails, stop all publishing and exit
--status2 string Specify which secondary queue to pull from.
--takeover-existing-channel If channel exists and we don't own it, take over the channel
--update Update previously synced channels instead of syncing new ones
--videos-limit int how many videos to process per channel (default 1000)
--upgrade-metadata Upgrade videos if they're on the old metadata version
--videos-limit int how many videos to process per channel (leave 0 for automatic detection)
```
## Running from Source
@ -86,17 +85,17 @@ Contributions to this project are welcome, encouraged, and compensated. For more
## Security
We take security seriously. Please contact [security@lbry.io](mailto:security@lbry.io) regarding any security issues. Our PGP key is [here](https://keybase.io/lbry/key.asc) if you need it.
We take security seriously. Please contact [security@lbry.io](mailto:security@lbry.io) regarding any security issues. Our PGP key is [here](https://lbry.com/faq/pgp-key) if you need it.
## Contact
The primary contact for this project is [Niko Storni](https://github.com/nikooo777) (niko@lbry.io).
The primary contact for this project is [Niko Storni](https://github.com/nikooo777) (niko@lbry.com).
## Additional Info and Links
- [https://lbry.io](https://lbry.io) - The live LBRY website
- [Discord Chat](https://chat.lbry.io) - A chat room for the LBRYians
- [Email us](mailto:hello@lbry.io) - LBRY Support email
- [https://lbry.com](https://lbry.com) - The live LBRY website
- [Discord Chat](https://chat.lbry.com) - A chat room for the LBRYians
- [Email us](mailto:hello@lbry.com) - LBRY Support email
- [Twitter](https://twitter.com/@lbryio) - LBRY Twitter page
- [Facebook](https://www.facebook.com/lbryio/) - LBRY Facebook page
- [Reddit](https://reddit.com/r/lbry) - LBRY Reddit page

View file

@ -0,0 +1,93 @@
package blobs_reflector
import (
"encoding/json"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/reflector.go/cmd"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/store"
"github.com/sirupsen/logrus"
"github.com/lbryio/ytsync/v5/util"
)
var dbHandle *db.SQL
func ReflectAndClean() error {
err := reflectBlobs()
if err != nil {
return err
}
return util.CleanupLbrynet()
}
func loadConfig(path string) (cmd.Config, error) {
var c cmd.Config
raw, err := ioutil.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return c, errors.Err("config file not found")
}
return c, err
}
err = json.Unmarshal(raw, &c)
return c, err
}
func reflectBlobs() error {
if util.IsBlobReflectionOff() {
return nil
}
logrus.Infoln("reflecting blobs...")
//make sure lbrynet is off
running, err := util.IsLbrynetRunning()
if err != nil {
return err
}
if running {
return errors.Prefix("cannot reflect blobs as the daemon is running", err)
}
logrus.SetLevel(logrus.InfoLevel)
defer logrus.SetLevel(logrus.DebugLevel)
ex, err := os.Executable()
if err != nil {
return errors.Err(err)
}
exPath := filepath.Dir(ex)
config, err := loadConfig(exPath + "/prism_config.json")
if err != nil {
return errors.Err(err)
}
if dbHandle == nil {
dbHandle = new(db.SQL)
err = dbHandle.Connect(config.DBConn)
if err != nil {
return errors.Err(err)
}
}
st := store.NewDBBackedStore(store.NewS3Store(config.AwsID, config.AwsSecret, config.BucketRegion, config.BucketName), dbHandle, false)
uploadWorkers := 10
uploader := reflector.NewUploader(dbHandle, st, uploadWorkers, false, false)
usr, err := user.Current()
if err != nil {
return errors.Err(err)
}
blobsDir := usr.HomeDir + "/.lbrynet/blobfiles/"
err = uploader.Upload(blobsDir)
if err != nil {
return errors.Err(err)
}
if uploader.GetSummary().Err > 0 {
return errors.Err("not al blobs were reflected. Errors: %d", uploader.GetSummary().Err)
}
return nil
}

35
config.json.example Normal file
View file

@ -0,0 +1,35 @@
{
"slack_token": "",
"slack_channel": "ytsync-dev",
"internal_apis_endpoint": "http://localhost:15400",
"internal_apis_auth_token": "ytsyntoken",
"lbrycrd_string": "tcp://lbry:lbry@localhost:15200",
"wallet_s3_config": {
"id": "",
"secret": "",
"region": "us-east-1",
"bucket": "ytsync-wallets",
"endpoint": ""
},
"blockchaindb_s3_config": {
"id": "",
"secret": "",
"region": "us-east-1",
"bucket": "blockchaindbs",
"endpoint": ""
},
"thumbnails_s3_config": {
"id": "",
"secret": "",
"region": "us-east-1",
"bucket": "thumbnails.lbry.com",
"endpoint": ""
},
"aws_thumbnails_s3_config": {
"id": "",
"secret": "",
"region": "us-east-1",
"bucket": "thumbnails.lbry.com",
"endpoint": ""
}
}

75
configs/configs.go Normal file
View file

@ -0,0 +1,75 @@
package configs
import (
"os"
"regexp"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
log "github.com/sirupsen/logrus"
"github.com/tkanos/gonfig"
)
type S3Configs struct {
ID string `json:"id"`
Secret string `json:"secret"`
Region string `json:"region"`
Bucket string `json:"bucket"`
Endpoint string `json:"endpoint"`
}
type Configs struct {
SlackToken string `json:"slack_token"`
SlackChannel string `json:"slack_channel"`
InternalApisEndpoint string `json:"internal_apis_endpoint"`
InternalApisAuthToken string `json:"internal_apis_auth_token"`
LbrycrdString string `json:"lbrycrd_string"`
WalletS3Config S3Configs `json:"wallet_s3_config"`
BlockchaindbS3Config S3Configs `json:"blockchaindb_s3_config"`
AWSThumbnailsS3Config S3Configs `json:"aws_thumbnails_s3_config"`
ThumbnailsS3Config S3Configs `json:"thumbnails_s3_config"`
}
var Configuration *Configs
func Init(configPath string) error {
if Configuration != nil {
return nil
}
c := Configs{}
err := gonfig.GetConf(configPath, &c)
if err != nil {
return errors.Err(err)
}
Configuration = &c
return nil
}
func (s *S3Configs) GetS3AWSConfig() *aws.Config {
return &aws.Config{
Credentials: credentials.NewStaticCredentials(s.ID, s.Secret, ""),
Region: &s.Region,
Endpoint: &s.Endpoint,
S3ForcePathStyle: aws.Bool(true),
}
}
func (c *Configs) GetHostname() string {
var hostname string
var err error
hostname, err = os.Hostname()
if err != nil {
log.Error("could not detect system hostname")
hostname = "ytsync_unknown"
}
reg, err := regexp.Compile("[^a-zA-Z0-9_]+")
if err == nil {
hostname = reg.ReplaceAllString(hostname, "_")
}
if len(hostname) > 30 {
hostname = hostname[0:30]
}
return hostname
}

315
downloader/downloader.go Normal file
View file

@ -0,0 +1,315 @@
package downloader
import (
"encoding/json"
"fmt"
"io/ioutil"
"math"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"strings"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/lbryio/ytsync/v5/downloader/ytdl"
"github.com/lbryio/ytsync/v5/ip_manager"
"github.com/lbryio/ytsync/v5/sdk"
"github.com/lbryio/ytsync/v5/shared"
util2 "github.com/lbryio/ytsync/v5/util"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/sirupsen/logrus"
)
func GetPlaylistVideoIDs(channelName string, maxVideos int, stopChan stop.Chan, pool *ip_manager.IPPool) ([]string, error) {
args := []string{"--skip-download", "https://www.youtube.com/channel/" + channelName + "/videos", "--get-id", "--flat-playlist", "--cookies", "cookies.txt", "--playlist-end", fmt.Sprintf("%d", maxVideos)}
ids, err := run(channelName, args, stopChan, pool)
if err != nil {
return nil, errors.Err(err)
}
videoIDs := make([]string, 0, maxVideos)
for i, v := range ids {
if v == "" {
continue
}
if i >= maxVideos {
break
}
videoIDs = append(videoIDs, v)
}
return videoIDs, nil
}
const releaseTimeFormat = "2006-01-02, 15:04:05 (MST)"
func GetVideoInformation(videoID string, stopChan stop.Chan, pool *ip_manager.IPPool) (*ytdl.YtdlVideo, error) {
args := []string{
"--skip-download",
"--write-info-json",
fmt.Sprintf("https://www.youtube.com/watch?v=%s", videoID),
"--cookies",
"cookies.txt",
"-o",
path.Join(util2.GetVideoMetadataDir(), videoID),
}
_, err := run(videoID, args, stopChan, pool)
if err != nil {
return nil, errors.Err(err)
}
f, err := os.Open(path.Join(util2.GetVideoMetadataDir(), videoID+".info.json"))
if err != nil {
return nil, errors.Err(err)
}
// defer the closing of our jsonFile so that we can parse it later on
defer f.Close()
// read our opened jsonFile as a byte array.
byteValue, _ := ioutil.ReadAll(f)
var video *ytdl.YtdlVideo
err = json.Unmarshal(byteValue, &video)
if err != nil {
return nil, errors.Err(err)
}
return video, nil
}
var errNotScraped = errors.Base("not yet scraped by caa.iti.gr")
var errUploadTimeEmpty = errors.Base("upload time is empty")
var errStatusParse = errors.Base("could not parse status, got number, need string")
var errConnectionIssue = errors.Base("there was a connection issue with the api")
func slack(format string, a ...interface{}) {
fmt.Printf(format+"\n", a...)
util.SendToSlack(format, a...)
}
func triggerScrape(videoID string, ip *net.TCPAddr) error {
//slack("Triggering scrape for %s", videoID)
u, err := url.Parse("https://caa.iti.gr/verify_videoV3")
q := u.Query()
q.Set("twtimeline", "0")
q.Set("url", "https://www.youtube.com/watch?v="+videoID)
u.RawQuery = q.Encode()
//slack("GET %s", u.String())
client := getClient(ip)
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return errors.Err(err)
}
req.Header.Set("User-Agent", ChromeUA)
res, err := client.Do(req)
if err != nil {
return errors.Err(err)
}
defer res.Body.Close()
var response struct {
Message string `json:"message"`
Status string `json:"status"`
VideoURL string `json:"video_url"`
}
err = json.NewDecoder(res.Body).Decode(&response)
if err != nil {
if strings.Contains(err.Error(), "cannot unmarshal number") {
return errors.Err(errStatusParse)
}
if strings.Contains(err.Error(), "no route to host") {
return errors.Err(errConnectionIssue)
}
return errors.Err(err)
}
switch response.Status {
case "removed_video":
return errors.Err("video previously removed from service")
case "no_video":
return errors.Err("they say 'video cannot be found'. wtf?")
default:
spew.Dump(response)
}
return nil
//https://caa.iti.gr/caa/api/v4/videos/reports/h-tuxHS5lSM
}
func getUploadTime(config *sdk.APIConfig, videoID string, ip *net.TCPAddr, uploadDate string) (string, error) {
//slack("Getting upload time for %s", videoID)
release, err := config.GetReleasedDate(videoID)
if err != nil {
logrus.Error(err)
}
ytdlUploadDate, err := time.Parse("20060102", uploadDate)
if err != nil {
logrus.Error(err)
}
if release != nil {
//const sqlTimeFormat = "2006-01-02 15:04:05"
sqlTime, err := time.ParseInLocation(time.RFC3339, release.ReleaseTime, time.UTC)
if err == nil {
hoursDiff := math.Abs(sqlTime.Sub(ytdlUploadDate).Hours())
if hoursDiff > 48 {
logrus.Infof("upload day from APIs differs from the ytdl one by more than 2 days.")
} else {
return sqlTime.Format(releaseTimeFormat), nil
}
} else {
logrus.Error(err)
}
}
return ytdlUploadDate.Format(releaseTimeFormat), nil
}
func getClient(ip *net.TCPAddr) *http.Client {
if ip == nil {
return http.DefaultClient
}
return &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
LocalAddr: ip,
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
}
}
const (
GoogleBotUA = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
ChromeUA = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"
maxAttempts = 3
extractionError = "YouTube said: Unable to extract video data"
throttledError = "HTTP Error 429"
AlternateThrottledError = "returned non-zero exit status 8"
youtubeDlError = "exit status 1"
videoPremiereError = "Premieres in"
liveEventError = "This live event will begin in"
)
func run(use string, args []string, stopChan stop.Chan, pool *ip_manager.IPPool) ([]string, error) {
var useragent []string
var lastError error
for attempts := 0; attempts < maxAttempts; attempts++ {
sourceAddress, err := getIPFromPool(use, stopChan, pool)
if err != nil {
return nil, err
}
argsForCommand := append(args, "--source-address", sourceAddress)
argsForCommand = append(argsForCommand, useragent...)
binary := "yt-dlp"
cmd := exec.Command(binary, argsForCommand...)
res, err := runCmd(cmd, stopChan)
pool.ReleaseIP(sourceAddress)
if err == nil {
return res, nil
}
lastError = err
if strings.Contains(err.Error(), youtubeDlError) {
if util.SubstringInSlice(err.Error(), shared.ErrorsNoRetry) {
break
}
if strings.Contains(err.Error(), extractionError) {
logrus.Warnf("known extraction error: %s", errors.FullTrace(err))
useragent = nextUA(useragent)
}
if strings.Contains(err.Error(), throttledError) || strings.Contains(err.Error(), AlternateThrottledError) {
pool.SetThrottled(sourceAddress)
//we don't want throttle errors to count toward the max retries
attempts--
}
}
}
return nil, lastError
}
func nextUA(current []string) []string {
if len(current) == 0 {
return []string{"--user-agent", GoogleBotUA}
}
return []string{"--user-agent", ChromeUA}
}
func runCmd(cmd *exec.Cmd, stopChan stop.Chan) ([]string, error) {
logrus.Infof("running yt-dlp cmd: %s", strings.Join(cmd.Args, " "))
var err error
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, errors.Err(err)
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, errors.Err(err)
}
err = cmd.Start()
if err != nil {
return nil, errors.Err(err)
}
outLog, err := ioutil.ReadAll(stdout)
if err != nil {
return nil, errors.Err(err)
}
errorLog, err := ioutil.ReadAll(stderr)
if err != nil {
return nil, errors.Err(err)
}
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
select {
case <-stopChan:
err := cmd.Process.Kill()
if err != nil {
return nil, errors.Prefix("failed to kill command after stopper cancellation", err)
}
return nil, errors.Err("interrupted by user")
case err := <-done:
if err != nil {
//return nil, errors.Prefix("yt-dlp "+strings.Join(cmd.Args, " ")+" ["+string(errorLog)+"]", err)
return nil, errors.Prefix(string(errorLog), err)
}
return strings.Split(strings.Replace(string(outLog), "\r\n", "\n", -1), "\n"), nil
}
}
func getIPFromPool(use string, stopChan stop.Chan, pool *ip_manager.IPPool) (sourceAddress string, err error) {
for {
sourceAddress, err = pool.GetIP(use)
if err != nil {
if errors.Is(err, ip_manager.ErrAllThrottled) {
select {
case <-stopChan:
return "", errors.Err("interrupted by user")
default:
time.Sleep(ip_manager.IPCooldownPeriod)
continue
}
} else {
return "", err
}
}
break
}
return
}

View file

@ -0,0 +1,40 @@
package downloader
import (
"testing"
"github.com/lbryio/ytsync/v5/ip_manager"
"github.com/lbryio/ytsync/v5/sdk"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
func TestGetPlaylistVideoIDs(t *testing.T) {
videoIDs, err := GetPlaylistVideoIDs("UCJ0-OtVpF0wOKEqT2Z1HEtA", 50, nil, nil)
if err != nil {
logrus.Error(err)
}
for _, id := range videoIDs {
println(id)
}
}
func TestGetVideoInformation(t *testing.T) {
s := stop.New()
ip, err := ip_manager.GetIPPool(s)
assert.NoError(t, err)
video, err := GetVideoInformation("kDGOHNpRjzc", s.Ch(), ip)
assert.NoError(t, err)
assert.NotNil(t, video)
logrus.Info(video.ID)
}
func Test_getUploadTime(t *testing.T) {
configs := sdk.APIConfig{}
got, err := getUploadTime(&configs, "kDGOHNpRjzc", nil, "20060102")
assert.NoError(t, err)
t.Log(got)
}

137
downloader/ytdl/Video.go Normal file
View file

@ -0,0 +1,137 @@
package ytdl
import (
"time"
"github.com/lbryio/ytsync/v5/sdk"
"github.com/sirupsen/logrus"
)
type YtdlVideo struct {
ID string `json:"id"`
Title string `json:"title"`
Thumbnails []Thumbnail `json:"thumbnails"`
Description string `json:"description"`
ChannelID string `json:"channel_id"`
Duration int `json:"duration"`
Categories []string `json:"categories"`
Tags []string `json:"tags"`
IsLive bool `json:"is_live"`
LiveStatus string `json:"live_status"`
ReleaseTimestamp *int64 `json:"release_timestamp"`
uploadDateForReal *time.Time
Availability string `json:"availability"`
ReleaseDate string `json:"release_date"`
UploadDate string `json:"upload_date"`
//WasLive bool `json:"was_live"`
//Formats interface{} `json:"formats"`
//Thumbnail string `json:"thumbnail"`
//Uploader string `json:"uploader"`
//UploaderID string `json:"uploader_id"`
//UploaderURL string `json:"uploader_url"`
//ChannelURL string `json:"channel_url"`
//ViewCount int `json:"view_count"`
//AverageRating interface{} `json:"average_rating"`
//AgeLimit int `json:"age_limit"`
//WebpageURL string `json:"webpage_url"`
//PlayableInEmbed bool `json:"playable_in_embed"`
//AutomaticCaptions interface{} `json:"automatic_captions"`
//Subtitles interface{} `json:"subtitles"`
//Chapters interface{} `json:"chapters"`
//LikeCount int `json:"like_count"`
//Channel string `json:"channel"`
//ChannelFollowerCount int `json:"channel_follower_count"`
//OriginalURL string `json:"original_url"`
//WebpageURLBasename string `json:"webpage_url_basename"`
//WebpageURLDomain string `json:"webpage_url_domain"`
//Extractor string `json:"extractor"`
//ExtractorKey string `json:"extractor_key"`
//Playlist interface{} `json:"playlist"`
//PlaylistIndex interface{} `json:"playlist_index"`
//DisplayID string `json:"display_id"`
//Fulltitle string `json:"fulltitle"`
//DurationString string `json:"duration_string"`
//RequestedSubtitles interface{} `json:"requested_subtitles"`
//HasDrm bool `json:"__has_drm"`
//RequestedFormats interface{} `json:"requested_formats"`
//Format string `json:"format"`
//FormatID string `json:"format_id"`
//Ext string `json:"ext"`
//Protocol string `json:"protocol"`
//Language interface{} `json:"language"`
//FormatNote string `json:"format_note"`
//FilesizeApprox int `json:"filesize_approx"`
//Tbr float64 `json:"tbr"`
//Width int `json:"width"`
//Height int `json:"height"`
//Resolution string `json:"resolution"`
//Fps int `json:"fps"`
//DynamicRange string `json:"dynamic_range"`
//Vcodec string `json:"vcodec"`
//Vbr float64 `json:"vbr"`
//StretchedRatio interface{} `json:"stretched_ratio"`
//Acodec string `json:"acodec"`
//Abr float64 `json:"abr"`
//Asr int `json:"asr"`
//Epoch int `json:"epoch"`
//Filename string `json:"filename"`
//Urls string `json:"urls"`
//Type string `json:"_type"`
}
type Thumbnail struct {
URL string `json:"url"`
Preference int `json:"preference"`
ID string `json:"id"`
Height int `json:"height,omitempty"`
Width int `json:"width,omitempty"`
Resolution string `json:"resolution,omitempty"`
}
func (v *YtdlVideo) GetUploadTime() time.Time {
//priority list:
// release timestamp from yt
// release timestamp from morty
// release date from yt
// upload date from yt
if v.uploadDateForReal != nil {
return *v.uploadDateForReal
}
var ytdlReleaseTimestamp time.Time
if v.ReleaseTimestamp != nil && *v.ReleaseTimestamp > 0 {
ytdlReleaseTimestamp = time.Unix(*v.ReleaseTimestamp, 0).UTC()
}
//get morty timestamp
var mortyReleaseTimestamp time.Time
mortyRelease, err := sdk.GetAPIsConfigs().GetReleasedDate(v.ID)
if err != nil {
logrus.Error(err)
} else if mortyRelease != nil {
mortyReleaseTimestamp, err = time.ParseInLocation(time.RFC3339, mortyRelease.ReleaseTime, time.UTC)
if err != nil {
logrus.Error(err)
}
}
ytdlReleaseDate, err := time.Parse("20060102", v.ReleaseDate)
if err != nil {
logrus.Error(err)
}
ytdlUploadDate, err := time.Parse("20060102", v.UploadDate)
if err != nil {
logrus.Error(err)
}
if !ytdlReleaseTimestamp.IsZero() {
v.uploadDateForReal = &ytdlReleaseTimestamp
} else if !mortyReleaseTimestamp.IsZero() {
v.uploadDateForReal = &mortyReleaseTimestamp
} else if !ytdlReleaseDate.IsZero() {
v.uploadDateForReal = &ytdlReleaseDate
} else {
v.uploadDateForReal = &ytdlUploadDate
}
return *v.uploadDateForReal
}

View file

@ -0,0 +1,28 @@
version: '3.4'
services:
###########
## MYSQL ##
###########
mysql:
image: mysql:5.7.23
restart: "no"
ports:
- 3306:3306
volumes:
- "../persist/chainquery/db:/var/lib/mysql"
## This one may need to be tweaked based on where you run this docker-compose from.
- "../stuff/my.cnf:/etc/mysql/conf.d/chainquery-optimizations.cnf"
################
## Chainquery ##
################
chainquery:
image: lbry/chainquery:v1.8.1
restart: "no"
ports:
- 6300:6300
depends_on:
- mysql
## TODO: Uncomment this in a docker-compose.override.yml to allow for external configurations.
volumes:
- "../persist/chainquery/config/chainqueryconfig.toml:/etc/chainquery/chainqueryconfig.toml"

View file

@ -0,0 +1,33 @@
## Get the latest source and extract it for the app container.
## Design choices, two RUN layers intended to keep builds faster, the zipped
FROM ubuntu:18.04 as prep
LABEL MAINTAINER="leopere [at] nixc [dot] us"
RUN apt-get update && \
apt-get -y install unzip curl telnet wait-for-it && \
apt-get autoclean -y && \
rm -rf /var/lib/apt/lists/*
WORKDIR /
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
COPY ./start.sh start
COPY ./healthcheck.sh healthcheck
ARG VERSION="master"
RUN curl -s -o /chainquery http://build.lbry.io/chainquery/branch-"${VERSION}"/chainquery && \
chmod +x /chainquery
FROM ubuntu:18.04 as app
RUN apt-get update && \
apt-get -y install telnet wait-for-it && \
apt-get autoclean -y && \
rm -rf /var/lib/apt/lists/*
ARG VERSION="master"
ADD https://raw.githubusercontent.com/lbryio/chainquery/"${VERSION}"/config/default/chainqueryconfig.toml /etc/lbry/chainqueryconfig.toml.orig
RUN adduser chainquery --gecos GECOS --shell /bin/bash --disabled-password --home /home/chainquery && \
chown -R chainquery:chainquery /etc/lbry
COPY --from=prep ./healthcheck /chainquery /start /usr/bin/
HEALTHCHECK --interval=1m --timeout=30s \
CMD healthcheck
EXPOSE 6300
USER chainquery
STOPSIGNAL SIGINT
CMD ["start"]

8
e2e/chainquery/docker/build.sh Executable file
View file

@ -0,0 +1,8 @@
#!/bin/bash
if [ $# -eq 0 ]
then
echo "No docker tag argument supplied. Use './build.sh <tag>'"
exit 1
fi
docker build --no-cache --build-arg VERSION=$1 --tag lbry/chainquery:$1 .
docker push lbry/chainquery:$1

View file

@ -0,0 +1,2 @@
#!/usr/bin/env bash
curl --fail http://localhost:6300/api/status || exit 1

View file

@ -0,0 +1,9 @@
# Default Homebrew MySQL server config
[mysqld]
# Only allow connections from localhost
innodb_log_file_size=5G
key_buffer_size=1G
innodb_flush_log_at_trx_commit = 0
innodb_autoinc_lock_mode=2
innodb_buffer_pool_size=1G
innodb_log_buffer_size=1G

51
e2e/chainquery/docker/start.sh Executable file
View file

@ -0,0 +1,51 @@
#!/usr/bin/env bash
## Config setup
## Setup Values
DEBUGMODE=$(echo "debugmode=$DEBUGMODE")
LBRYCRDURL=$(echo "lbrycrdurl=\"rpc://$RPC_USER:$RPC_PASSWORD@10.5.1.2:9245\"")
MYSQLDSN=$(echo "mysqldsn=\"$MYSQL_USER:$MYSQL_PASSWORD@tcp($MYSQL_SERVER:3306)/$MYSQL_DATABASE\"")
APIMYSQLDSN=$(echo "apimysqldsn=\"$MYSQL_USER:$MYSQL_PASSWORD@tcp($MYSQL_SERVER:3306)/$MYSQL_DATABASE\"")
## Setup Defaults
DEBUGMODE_DEFAULT='#DEFAULT-debugmode=false'
LBRYCRDURL_DEFAULT='#DEFAULT-lbrycrdurl="rpc://lbry:lbry@localhost:9245"'
MYSQLDSN_DEFAULT='#DEFAULT-mysqldsn="lbry:lbry@tcp(localhost:3306)/chainquery"'
APIMYSQLDSN_DEFAULT='#DEFAULT-apimysqldsn="lbry:lbry@tcp(localhost:3306)/chainquery"'
## Add setup value variable name to this list to get processed on container start
CONFIG_SETTINGS=(
DEBUGMODE
LBRYCRDURL
MYSQLDSN
APIMYSQLDSN
)
function set_configs() {
## Set configs on container start if not already set.
for i in "${!CONFIG_SETTINGS[@]}"; do
## Indirect references http://tldp.org/LDP/abs/html/ivr.html
eval FROM_STRING=\$"${CONFIG_SETTINGS[$i]}_DEFAULT"
eval TO_STRING=\$${CONFIG_SETTINGS[$i]}
## TODO: Add a bit more magic to make sure that you're only configuring things if not set by config mounts.
sed -i "s~$FROM_STRING~"$TO_STRING"~g" /etc/lbry/chainqueryconfig.toml
done
echo "Reading config for debugging."
cat /etc/lbry/chainqueryconfig.toml
}
if [[ ! -f /etc/lbry/chainqueryconfig.toml ]]; then
echo "[INFO]: Did not find chainqueryconfig.toml"
echo " Installing default and configuring with provided environment variables if any."
## Install fresh copy of config file.
echo "cp -v /etc/lbry/chainqueryconfig.toml.orig /etc/lbry/chainqueryconfig.toml"
cp -v /etc/lbry/chainqueryconfig.toml.orig /etc/lbry/chainqueryconfig.toml
ls -lAh /etc/lbry/
set_configs
else
echo "[INFO]: Found a copy of chainqueryconfig.toml in /etc/lbry"
fi
## For now keeping this simple. Potentially eventually add all command args as envvars for the Dockerfile or use safe way to add args via docker-compose.yml
chainquery serve --configpath "/etc/lbry/"

99
e2e/chainqueryconfig.toml Normal file
View file

@ -0,0 +1,99 @@
#Debug mode outputs specific information to the console
#DEFAULT: false
#debugmode=
#DebugQueryMode outputs SQL Boiler queries to the console.
#DEFAULT: false
#debugquerymode=
#LBRYcrd URL is required for chainquery to query the blockchain
#DEFAULT: "rpc://lbry:lbry@localhost:9245"
lbrycrdurl="rpc://lbry:lbry@lbrycrd:29245"
#MySQL DSN is required for chainquery to store information.
#DEFAULT: "lbry:lbry@tcp(localhost:3306)/chainquery"
#SUGGESTED: "lbry:lbry@unix(/var/run/mysqld/mysqld.sock)/chainquery"
mysqldsn="lbry:lbry@tcp(mysql:3306)/chainquery"
#API MySQL DSN is required for chainquery to expose a SQL query service
#DEFAULT: "lbry:lbry@tcp(localhost:3306)/chainquery"
#SUGGESTED: "lbry:lbry@unix(/var/run/mysqld/mysqld.sock)/chainquery"
apimysqldsn="lbry:lbry@tcp(mysql:3306)/chainquery"
#API Host and Port is required for the API Server to bind and listen on.
#DEFAULT: "0.0.0.0:6300"
#apihostport=
#Profile mode enables and disables the reporting of a profile for chainquery
#DEFAULT: false
#profilemode=
#Daemon mode tells chainquery how hard it should work catch up processing the blockchain
#deamonmode=0 #BeastMode it continuously process block after block until caughtup.
#daemonmode=1 #SlowAndSteadyMode it will process block with a frequency of 1 block every 100ms
#daemonmode=2 #DelayMode it will process a block with a configured delay frequency (set via 'processingdelay')
#daemonmode=3 #DaemonMode it will process a block every iteration of the daemon.
#DEFAULT: 0
#deamonmode=
#Default client timeout is for communication with the api of chainquery
#DEFAULT: 20 #Measured in seconds
#defaultclienttimeout=
#Processing delay is used to determine how frequently chainquery should process a block
# It is only used if Daemon mode is set to delay mode
#DEFAULT: 100 #Measured in milliseconds
#processingdelay=
#Daemon delay is the frequency at which chainquery checks for work to do.
#DEFAULT: 1 #Measured in seconds
#daemondelay=
#Profiling options - will output the time take for certain opertions related to the below category
#DEFAULT: false (for all 3 params)
#daemonprofile=
#lbrycrdprofile=
#mysqlprofile=
#Slack Hook URL allows slack integration. All logging info level and above is posted to a slack channel.
#DEFAULT: ""
#slackhookurl=
#Slack Channel is the channel that you want the messages to appear. Works together with the hook url.
#DEFAULT: ""
#slackchannel=
#Slack Log Level tells chainquery what level of logging will be sent to the slack channel. It will log all levels below
# it as well. Panic=0,Fatal=1,Error=2,Warning=3,Info=4,Debug=5
#DEFAULT: 0
#slackloglevel=
#The command that should be executed to trigger a self update of the software. For linux, for example, `<yourscript>.sh`
#DEFAULT: ""
#autoupdatecommand=
#Twilio service of chainquery to send specifically important information to key users of the Chainquery install.
#DEFAULT:
##twiliosid=""
##twilioauthtoken=""
##smsrecipients=["",""]
##smsfromphonenumber=""
#twiliosid=
#twilioauthtoken=
#smsrecipients=
#smsfromphonenumber=
#API Keys - Disallowed by default unless keys are entered.
#DEFAULT: []
#apikeys=
#Max Failures - Specifies the number of failures that can happen in processing a transaction. This is for parallel
#transaction processing which puts a transaction to the back of the processing queue if it fails. It can fail say if its
#source output to spend is not already processed.
#DEFAULT: 1000
#maxfailures=
#Block Chain Name - Specifies the chain params for parsing blocks, transactions, claims, and addresses. valid choices are
#lbrycrd_main, lbrycrd_testnet, and lbrycrd_regtest.
#DEFAULT: "lbrycrd_main"
blockchainname="lbrycrd_regtest"

10
e2e/daemon_settings.yml Normal file
View file

@ -0,0 +1,10 @@
blockchain_name: lbrycrd_regtest
lbryum_servers:
- walletserver:50001
reflect_streams: false
save_blobs: true
save_files: false
share_usage_data: false
tcp_port: 3333
udp_port: 4444
use_upnp: false

24
e2e/data_setup.sh Executable file
View file

@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -e
#Add a ytsync user
ADDYTSYNCUSER='INSERT INTO user (given_name) VALUE("ytsync user")'
mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTSYNCUSER"
#Insert an auth token for the user to be used by ytsync
ADDYTSYNCAUTHTOKEN='INSERT INTO auth_token (user_id, value) VALUE(1,"ytsyntoken")'
mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTSYNCAUTHTOKEN"
#Give priveledges to ytsync user
ASSIGNGROOP='INSERT INTO user_groop (user_id, groop_id) VALUE( 1,3)'
mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ASSIGNGROOP"
#Add youtuber to sync
ADDYTSYNCER='INSERT INTO user (given_name) VALUE("youtuber")'
mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTSYNCER"
#Insert an auth token for the youtuber to be used by ytsync
ADDYTSYNCAUTHTOKEN='INSERT INTO auth_token (user_id, value) VALUE(2,"youtubertoken")'
mysql -u lbry -plbry -D lbry -h "127.0.0.1" -P 15500 -e "$ADDYTSYNCAUTHTOKEN"
#Add their youtube channel to be synced
ADDYTCHANNEL="INSERT INTO youtube_data (user_id, status_token,desired_lbry_channel,channel_id,channel_name,status,created_at,source,total_videos,total_subscribers,should_sync,redeemable,total_views,reviewed,last_uploaded_video,length_limit,size_limit,reward_amount,reward_expiration)
VALUE(2,'3qzGyuVjQaf7t4pKKu2Er1NRW2LJkeWw','$1','$2','СтопХам','queued','2019-08-01 00:00:00','sync',1000,1000,1,1,10000,1,'$3',60,2048,0,'2019-08-01 00:00:00')"
mysql -u lbry -plbry -D lbry -h "127.0.0.1" --default-character-set=utf8 -P 15500 -e "$ADDYTCHANNEL"

141
e2e/docker-compose.yml Normal file
View file

@ -0,0 +1,141 @@
version: "3.5"
services:
#############
## Lbrycrd ##
#############
lbrycrd:
image: lbry/lbrycrd:v0.17.3.2-deprecatedrpc
restart: "no"
ports:
- "15201:29246"
- "15200:29245"
expose:
- "29246"
- "29245"
## host volumes for persistent data such as wallet private keys.
volumes:
- "./persist:/data"
environment:
- RUN_MODE=regtest
###################
## Wallet Server ##
###################
walletserver:
image: lbry/wallet-server:v0.101.1
restart: always
environment:
- DB_DIRECTORY=/database
- MAX_SEND=1000000000000000000000
- DAEMON_URL=http://lbry:lbry@lbrycrd:29245
- MAX_SUBS=1000000000000
- BANDWIDTH_LIMIT=80000000000
- SESSION_TIMEOUT=10000000000000000000000000
- TCP_PORT=50001
- ELASTIC_HOST=es01
ports:
- "15300:50001"
expose:
- "50001"
depends_on:
- lbrycrd
- es01
ulimits:
nofile:
soft: 90000
hard: 90000
#command: lbry.wallet.server.coin.LBC
command: lbry.wallet.server.coin.LBCRegTest
#############
## elasticsearch ##
#############
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
container_name: es01
environment:
- node.name=es01
- discovery.type=single-node
- indices.query.bool.max_clause_count=8196
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms4g -Xmx4g"
ulimits:
memlock:
soft: -1
hard: -1
ports:
- "9200:9200"
expose:
- "9200"
#############
## Lbrynet ##
#############
lbrynet:
image: lbry/lbrynet:v0.99.0
restart: always
ports:
- "15100:5279"
- "15101:5280"
expose:
- "5279"
- "5280"
depends_on:
- walletserver
environment:
- LBRY_STREAMING_SERVER=0.0.0.0:5280
- LBRY_FEE_PER_NAME_CHAR=0
volumes:
- "./persist/.lbrynet:/home/lbrynet"
- ".:/etc/lbry" #Put your daemon_settings.yml here
# /private/var/tmp for OSX and /var/tmp for Linux
- "${LOCAL_TMP_DIR}"
###########
## MySQL ##
###########
mysql:
image: mysql/mysql-server:5.7.33
restart: "no"
ports:
- "15500:3306"
expose:
- "3306"
environment:
- MYSQL_ALLOW_EMPTY_PASSWORD=true
- MYSQL_DATABASE=lbry
- MYSQL_USER=lbry
- MYSQL_PASSWORD=lbry
- MYSQL_LOG_CONSOLE=true
volumes:
- "./init.sql:/docker-entrypoint-initdb.d/init.sql"
- "./chainquery/docker/my.cnf:/etc/mysql/conf.d/chainquery-optimizations.cnf"
###################
## Internal APIs ##
###################
internalapis:
image: odyseeteam/internal-apis:master
restart: "no"
ports:
- "15400:8080"
expose:
- "8080"
depends_on:
- mysql
- lbrycrd
environment:
- MYSQL_DSN=lbry:lbry@tcp(mysql:3306)/lbry
- LBRYCRD_CONNECT=rpc://lbry:lbry@lbrycrd:29245
- REPLICA_DSN=lbry:lbry@tcp(mysql:3306)/lbry
entrypoint: wait-for-it -t 0 chainquery:6300 -- wait-for-it -t 0 lbrycrd:29245 -- ./latest serve
################
## Chainquery ##
################
chainquery:
image: odyseeteam/chainquery:master
restart: "no"
ports:
- 6300:6300
depends_on:
- lbrycrd
- mysql
## TODO: Uncomment this in a docker-compose.override.yml to allow for external configurations.
volumes:
- ./chainqueryconfig.toml:/etc/lbry/chainqueryconfig.toml
entrypoint: wait-for-it -t 0 lbrycrd:29245 -- wait-for-it -t 0 mysql:3306 -- start

96
e2e/e2e.sh Executable file
View file

@ -0,0 +1,96 @@
#!/usr/bin/env bash
set -e
#Always compile ytsync
make
#Always compile supporty
cd e2e/supporty && make && cd ../..
#OVERRIDE this in your .env file if running from mac. Check docker-compose.yml for details
export LOCAL_TMP_DIR="/var/tmp:/var/tmp"
#Private Variables Set in local installations: SLACK_TOKEN,YOUTUBE_API_KEY,AWS_S3_ID,AWS_S3_SECRET,AWS_S3_REGION,AWS_S3_BUCKET
touch -a .env && set -o allexport; source ./.env; set +o allexport
echo "LOCAL_TMP_DIR=$LOCAL_TMP_DIR"
# Compose settings - docker only
export LBRYNET_ADDRESS="http://localhost:15100"
export LBRYCRD_STRING="tcp://lbry:lbry@localhost:15200" #required for supporty
export LBRYNET_USE_DOCKER=true
export REFLECT_BLOBS=false
export CLEAN_ON_STARTUP=true
export REGTEST=true
# Local settings
export BLOBS_DIRECTORY="$(pwd)/e2e/blobsfiles"
export LBRYNET_DIR="$(pwd)/e2e/persist/.lbrynet/.local/share/lbry/lbrynet/"
export LBRYUM_DIR="$(pwd)/e2e/persist/.lbrynet/.local/share/lbry/lbryum"
export TMP_DIR="/var/tmp"
export CHAINNAME="lbrycrd_regtest"
export UID
cd ./e2e
docker-compose stop
docker-compose rm -f
echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
docker-compose pull
if [[ -d persist ]]; then rm -rf persist; fi
mkdir -m 0777 -p ./persist
mkdir -m 777 -p ./persist/.walletserver
mkdir -m 777 -p ./persist/.lbrynet
#sudo chown -Rv 999:999 ./persist/.walletserver
#sudo chown -Rv 1000:1000 ./persist/.lbrynet
docker-compose up -d
printf 'waiting for internal apis'
until curl --output /dev/null --silent --head --fail http://localhost:15400; do
printf '.'
sleep 1
done
echo "successfully started..."
channelToSync="UCMn-zv1SE-2y6vyewscfFqw"
channelName=@whatever"$(date +%s)"
latestVideoID="yPJgjiMbmX0"
#Data Setup for test
./data_setup.sh "$channelName" "$channelToSync" "$latestVideoID"
# Execute the sync test!
./../bin/ytsync --channelID "$channelToSync" --videos-limit 2 --concurrent-jobs 4 --quick #Force channel intended...just in case. This channel lines up with the api container
status=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT status FROM youtube_data WHERE id=1')
videoStatus=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT status FROM synced_video WHERE id=1')
videoClaimID1=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT publish.claim_id FROM synced_video INNER JOIN publish ON publish.id = synced_video.publish_id WHERE synced_video.id=1')
videoClaimID2=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT publish.claim_id FROM synced_video INNER JOIN publish ON publish.id = synced_video.publish_id WHERE synced_video.id=2')
videoClaimAddress1=$(mysql -u lbry -plbry -ss -D chainquery -h "127.0.0.1" -P 15500 -e 'SELECT claim_address FROM claim WHERE id=2')
videoClaimAddress2=$(mysql -u lbry -plbry -ss -D chainquery -h "127.0.0.1" -P 15500 -e 'SELECT claim_address FROM claim WHERE id=3')
# Create Supports for published claim
./supporty/supporty "$channelName" "${videoClaimID1}" "${videoClaimAddress1}" lbrycrd_regtest 1.0
./supporty/supporty "$channelName" "${videoClaimID2}" "${videoClaimAddress2}" lbrycrd_regtest 2.0
./supporty/supporty "$channelName" "${videoClaimID2}" "${videoClaimAddress2}" lbrycrd_regtest 3.0
./supporty/supporty "$channelName" "${videoClaimID1}" "${videoClaimAddress1}" lbrycrd_regtest 3.0
curl --data-binary '{"jsonrpc":"1.0","id":"curltext","method":"generate","params":[1]}' -H 'content-type:text/plain;' --user lbry:lbry http://localhost:15200
# Reset status for transfer test
mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e "UPDATE youtube_data SET status = 'queued' WHERE id = 1"
# Trigger transfer api
curl -i -H 'Accept: application/json' -H 'Content-Type: application/json' 'http://localhost:15400/yt/transfer?auth_token=youtubertoken&address=n4eYeXAYmHo4YRUDEfsEhucy8y5LKRMcHg&public_key=tpubDA9GDAntyJu4hD3wU7175p7CuV6DWbYXfyb2HedBA3yuBp9HZ4n3QE4Ex6RHCSiEuVp2nKAL1Lzf2ZLo9ApaFgNaJjG6Xo1wB3iEeVbrDZp'
# Execute the transfer test!
./../bin/ytsync --channelID $channelToSync --videos-limit 2 --concurrent-jobs 4 --quick #Force channel intended...just in case. This channel lines up with the api container
# Check that the channel and the video are marked as transferred and that all supports are spent
channelTransferStatus=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT distinct transfer_state FROM youtube_data')
videoTransferStatus=$(mysql -u lbry -plbry -ss -D lbry -h "127.0.0.1" -P 15500 -e 'SELECT distinct transferred FROM synced_video')
nrUnspentSupports=$(mysql -u lbry -plbry -ss -D chainquery -h "127.0.0.1" -P 15500 -e 'SELECT COUNT(*) FROM chainquery.support INNER JOIN output ON output.transaction_hash = support.transaction_hash_id AND output.vout = support.vout WHERE output.is_spent = 0')
if [[ $status != "synced" || $videoStatus != "published" || $channelTransferStatus != "2" || $videoTransferStatus != "1" || $nrUnspentSupports != "1" ]]; then
echo "~~!!!~~~FAILED~~~!!!~~"
echo "Channel Status: $status"
echo "Video Status: $videoStatus"
echo "Channel Transfer Status: $channelTransferStatus"
echo "Video Transfer Status: $videoTransferStatus"
echo "Nr Unspent Supports: $nrUnspentSupports"
#docker-compose logs --tail="all" lbrycrd
#docker-compose logs --tail="all" walletserver
#docker-compose logs --tail="all" lbrynet
#docker-compose logs --tail="all" internalapis
exit 1;
else
echo "SUCCESSSSSSSSSSSSS!"
fi;
docker-compose down

3
e2e/init.sql Normal file
View file

@ -0,0 +1,3 @@
CREATE DATABASE IF NOT EXISTS chainquery;
GRANT ALL PRIVILEGES ON chainquery.* TO 'lbry'@'%';
FLUSH PRIVILEGES;

View file

@ -0,0 +1,26 @@
version: "3"
networks:
lbry-network:
external: true
services:
#############
## Lbrycrd ##
#############
lbrycrd:
image: lbry/lbrycrd:v0.12.4.1
restart: always
networks:
lbry-network:
ipv4_address: 10.6.1.1
ports:
- "15201:29246"
- "15200:29245"
expose:
- "29246"
- "29245"
## host volumes for persistent data such as wallet private keys.
volumes:
- "../persist/data:/data"
environment:
- RUN_MODE=regtest

View file

@ -0,0 +1,37 @@
FROM ubuntu:18.04 as prep
LABEL MAINTAINER="leopere [at] nixc [dot] us"
## TODO: Implement version pinning. `apt-get install curl=<version>`
RUN apt-get update && \
apt-get -y install unzip curl build-essential && \
apt-get autoclean -y && \
rm -rf /var/lib/apt/lists/*
WORKDIR /
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
COPY ./start.sh start
COPY ./healthcheck.sh healthcheck
COPY ./advance_blocks.sh advance
COPY ./fix-permissions.c fix-permissions.c
## Add lbrycrd - Change the version below to create an image for a different tag/version
ARG VERSION="v0.12.4.1"
RUN URL=$(curl -s https://api.github.com/repos/lbryio/lbrycrd/releases/$(if [ "${VERSION}" = 'latest' ]; then echo "latest"; else echo "tags/${VERSION}"; fi) | grep browser_download_url | grep lbrycrd-linux.zip | cut -d'"' -f4) && echo $URL && curl -L -o /lbrycrd-linux.zip $URL
RUN unzip ./lbrycrd-linux.zip && \
gcc fix-permissions.c -o fix-permissions && \
chmod +x ./lbrycrdd ./lbrycrd-cli ./lbrycrd-tx ./start ./healthcheck ./fix-permissions ./advance
FROM ubuntu:18.04 as app
COPY --from=prep /lbrycrdd /lbrycrd-cli /lbrycrd-tx /start /healthcheck /fix-permissions /advance /usr/bin/
RUN addgroup --gid 1000 lbrycrd && \
adduser lbrycrd --uid 1000 --gid 1000 --gecos GECOS --shell /bin/bash --disabled-password --home /data && \
mkdir /etc/lbry && \
chown lbrycrd /etc/lbry && \
chmod a+s /usr/bin/fix-permissions
VOLUME ["/data"]
WORKDIR /data
## TODO: Implement healthcheck.
# HEALTHCHECK ["healthcheck"]
EXPOSE 9246 9245 11337 29245
USER lbrycrd
CMD ["start"]

View file

@ -0,0 +1,5 @@
#!/usr/bin/env bash
while true; do
lbrycrd-cli -conf=/etc/lbry/lbrycrd.conf generate 100 >> /tmp/output.log
sleep 2
done

8
e2e/lbrycrd/docker/build.sh Executable file
View file

@ -0,0 +1,8 @@
#!/bin/bash
if [ $# -eq 0 ]
then
echo "No docker tag argument supplied. Use './build.sh <tag>'"
exit 1
fi
docker build --build-arg VERSION=$1 --tag lbry/lbrycrd:$1 .
docker push lbry/lbrycrd:$1

View file

@ -0,0 +1,9 @@
#include <unistd.h>
int main() {
// This program needs to run with setuid == root
// This needs to be in a compiled language because you cannot setuid bash scripts
setuid(0);
execle("/bin/bash", "bash", "-c",
"/bin/chown -R lbrycrd:lbrycrd /data && /bin/chmod -R 755 /data/",
(char*) NULL, (char*) NULL);
}

View file

@ -0,0 +1,4 @@
## TODO: Implement a healthcheck for lbrycrd.
curl --data-binary '{"jsonrpc":"1.0","id":"curltext","method":"getinfo","params":[]}' -H 'content-type:text/plain;' http://$RPC_USER:$RPC_PASSWORD@127.0.0.1:9246
## OR
lbrycrd-cli getinfo

127
e2e/lbrycrd/docker/start.sh Normal file
View file

@ -0,0 +1,127 @@
#!/usr/bin/env bash
CONFIG_PATH=/etc/lbry/lbrycrd.conf
function override_config_option() {
# Remove existing config line from a config file
# and replace with environment fed value.
# Does nothing if the variable does not exist.
# var Name of ENV variable
# option Name of config option
# config Path of config file
local var=$1 option=$2 config=$3
if [[ -v $var ]]; then
# Remove the existing config option:
sed -i "/^$option\W*=/d" "$config"
# Add the value from the environment:
echo "$option=${!var}" >> "$config"
fi
}
function set_config() {
if [ -d "$CONFIG_PATH" ]; then
echo "$CONFIG_PATH is a directory when it should be a file."
exit 1
elif [ -f "$CONFIG_PATH" ]; then
echo "Merging the mounted config file with environment variables."
local MERGED_CONFIG=/tmp/lbrycrd_merged.conf
cat $CONFIG_PATH > $MERGED_CONFIG
echo "" >> $MERGED_CONFIG
override_config_option PORT port $MERGED_CONFIG
override_config_option RPC_USER rpcuser $MERGED_CONFIG
override_config_option RPC_PASSWORD rpcpassword $MERGED_CONFIG
override_config_option RPC_ALLOW_IP rpcallowip $MERGED_CONFIG
override_config_option RPC_PORT rpcport $MERGED_CONFIG
override_config_option RPC_BIND rpcbind $MERGED_CONFIG
# Make the new merged config file the new CONFIG_PATH
# This ensures that the original file the user mounted remains unmodified
CONFIG_PATH=$MERGED_CONFIG
else
echo "Creating a fresh config file from environment variables."
## Set config params
{
echo "port=${PORT=9246}"
echo "rpcuser=${RPC_USER=lbry}"
echo "rpcpassword=${RPC_PASSWORD=lbry}"
echo "rpcallowip=${RPC_ALLOW_IP=127.0.0.1/24}"
echo "rpcport=${RPC_PORT=9245}"
echo "rpcbind=${RPC_BIND=0.0.0.0}"
echo "deprecatedrpc=accounts"
echo "deprecatedrpc=validateaddress"
echo "deprecatedrpc=signrawtransaction"
} >> $CONFIG_PATH
fi
echo "Config: "
cat $CONFIG_PATH
}
## Ensure perms are correct prior to running main binary
/usr/bin/fix-permissions
## You can optionally specify a run mode if you want to use lbry defined presets for compatibility.
case $RUN_MODE in
default )
set_config
lbrycrdd -server -conf=$CONFIG_PATH -printtoconsole
;;
## If it's a first run you need to do a full index including all transactions
## tx index creates an index of every single transaction in the block history if
## not specified it will only create an index for transactions that are related to the wallet or have unspent outputs.
## This is generally specific to chainquery.
reindex )
## Apply this RUN_MODE in the case you need to update a dataset. NOTE: you do not need to use `RUN_MODE reindex` for more than one complete run.
set_config
lbrycrdd -server -txindex -reindex -conf=$CONFIG_PATH -printtoconsole
;;
chainquery )
## If your only goal is to run Chainquery against this instance of lbrycrd and you're starting a
## fresh local dataset use this run mode.
set_config
lbrycrdd -server -txindex -conf=$CONFIG_PATH -printtoconsole
;;
regtest )
## Set config params
## TODO: Make this more automagic in the future.
mkdir -p "$(dirname $CONFIG_PATH)"
echo "rpcuser=lbry" > $CONFIG_PATH
echo "rpcpassword=lbry" >> $CONFIG_PATH
echo "rpcport=29245" >> $CONFIG_PATH
echo "rpcbind=0.0.0.0" >> $CONFIG_PATH
echo "rpcallowip=0.0.0.0/0" >> $CONFIG_PATH
echo "regtest=1" >> $CONFIG_PATH
echo "txindex=1" >> $CONFIG_PATH
echo "server=1" >> $CONFIG_PATH
echo "printtoconsole=1" >> $CONFIG_PATH
echo "deprecatedrpc=accounts" >> $CONFIG_PATH
echo "deprecatedrpc=validateaddress" >> $CONFIG_PATH
echo "deprecatedrpc=signrawtransaction" >> $CONFIG_PATH
echo "vbparams=segwit:0:999999999999" >> $CONFIG_PATH
echo "addresstype=legacy" >> $CONFIG_PATH
#nohup advance &>/dev/null &
lbrycrdd -conf=$CONFIG_PATH $1
;;
testnet )
## Set config params
## TODO: Make this more automagic in the future.
mkdir -p "$(dirname $CONFIG_PATH)"
echo "rpcuser=lbry" > $CONFIG_PATH
echo "rpcpassword=lbry" >> $CONFIG_PATH
echo "rpcport=29245" >> $CONFIG_PATH
echo "rpcbind=0.0.0.0" >> $CONFIG_PATH
echo "rpcallowip=0.0.0.0/0" >> $CONFIG_PATH
echo "testnet=1" >> $CONFIG_PATH
echo "txindex=1" >> $CONFIG_PATH
echo "server=1" >> $CONFIG_PATH
echo "printtoconsole=1" >> $CONFIG_PATH
echo "deprecatedrpc=accounts" >> $CONFIG_PATH
echo "deprecatedrpc=validateaddress" >> $CONFIG_PATH
echo "deprecatedrpc=signrawtransaction" >> $CONFIG_PATH
#nohup advance &>/dev/null &
lbrycrdd -conf=$CONFIG_PATH $1
;;
* )
echo "Error, you must define a RUN_MODE environment variable."
echo "Available options are testnet, regtest, chainquery, default, and reindex"
;;
esac

View file

@ -0,0 +1,23 @@
version: "3"
networks:
lbry-network:
external: true
services:
#############
## Lbrynet ##
#############
lbrynet:
image: lbry/lbrynet:v0.99.0
restart: "no"
networks:
lbry-network:
ipv4_address: 10.6.1.3
ports:
- "15100:5279"
- "15101:5280"
environment:
- LBRY_STREAMING_SERVER=0.0.0.0:5280
volumes:
- "../persist/data/.lbrynet:/home/lbrynet"
- "./settings:/etc/lbry" #Put your daemon_settings.yml here

View file

@ -0,0 +1,27 @@
## This base image is for running the latest lbrynet-daemon release.
FROM ubuntu:18.04 as prep
LABEL MAINTAINER="leopere [at] nixc [dot] us"
RUN apt-get update && apt-get -y install unzip curl telnet wait-for-it
## Add lbrynet
ARG VERSION="latest"
RUN URL=$(curl -s https://api.github.com/repos/lbryio/lbry-sdk/releases/$(if [ "${VERSION}" = 'latest' ]; then echo "latest"; else echo "tags/${VERSION}"; fi) | grep browser_download_url | grep lbrynet-linux.zip | cut -d'"' -f4) && echo $URL && curl -L -o /lbrynet.linux.zip $URL
COPY start.sh /usr/bin/start
COPY checkmount.sh /usr/bin/checkmount
RUN unzip /lbrynet.linux.zip -d /lbrynet/ && \
mv /lbrynet/lbrynet /usr/bin && \
chmod a+x /usr/bin/checkmount /usr/bin/start /usr/bin/lbrynet
FROM ubuntu:18.04 as app
COPY --from=prep /usr/bin/start /usr/bin/checkmount /usr/bin/lbrynet /usr/bin/
RUN adduser lbrynet --gecos GECOS --shell /bin/bash --disabled-password --home /home/lbrynet
## Daemon port [Intended for internal use]
## LBRYNET talks to peers on port 3333 [Intended for external use] this port is used to discover other lbrynet daemons with blobs.
## Expose 5566 Reflector port to listen on
## Expose 5279 Port the daemon API will listen on
## the lbryumx aka Wallet port [Intended for internal use]
#EXPOSE 4444 3333 5566 5279 50001
USER lbrynet
ENTRYPOINT ["/usr/bin/checkmount"]
CMD ["start"]

8
e2e/lbrynet/docker/build.sh Executable file
View file

@ -0,0 +1,8 @@
#!/bin/bash
if [ $# -eq 0 ]
then
echo "No docker tag argument supplied. Use './build.sh <tag>'"
exit 1
fi
docker build --build-arg VERSION=$1 --tag lbry/lbrynet:$1 .
docker push lbry/lbrynet:$1

View file

@ -0,0 +1,12 @@
#!/bin/bash
## TODO: Make a bit more aware of the run mode of this appliance in case there is ever a test mode enabled in the start.sh
mountpoint=/home/lbrynet
if ! grep -qs ".* $mountpoint " /proc/mounts; then
echo "$mountpoint not mounted, refusing to run."
## TODO: We should have documentation that this error references directly with a URL as to why it won't run without a volume.
exit 1
else
bash -c "$*"
fi

View file

@ -0,0 +1,4 @@
#!/bin/bash
lbrynet start \
--api="${API_BIND_IP:-0.0.0.0}":"${API_PORT:-5279}" \
--config="${CONFIG_PATH:-/etc/lbry/daemon_settings.yml}"

View file

@ -0,0 +1,13 @@
#blockchain_name: lbrycrd_main
#blockchain_name: lbrycrd_testnet
blockchain_name: lbrycrd_regtest
lbryum_servers:
# - spv1.lbry.com:50001 #Production Wallet Server
- walletserver:50001
save_blobs: true
save_files: false
reflect_streams: false #for the love of god, don't upload regtest streams to reflector!
share_usage_data: false
tcp_port: 3333
udp_port: 4444
use_upnp: true

12
e2e/supporty/Makefile Normal file
View file

@ -0,0 +1,12 @@
BINARY=supporty
DIR = $(shell cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)
BIN_DIR = ${DIR}
.PHONY: build clean test lint
.DEFAULT_GOAL: build
build:
mkdir -p ${BIN_DIR} && CGO_ENABLED=0 go build -asmflags -trimpath=${DIR} -o ${BIN_DIR}/${BINARY} supporty.go
chmod +x ${BIN_DIR}/${BINARY}

43
e2e/supporty/supporty.go Normal file
View file

@ -0,0 +1,43 @@
package main
import (
"os"
"strconv"
"strings"
"github.com/lbryio/ytsync/v5/util"
"github.com/sirupsen/logrus"
)
func main() {
if len(os.Args) != 6 {
logrus.Info(strings.Join(os.Args, ","))
logrus.Fatal("Not enough arguments: name, claimID, address, blockchainName, claimAmount")
}
println("Supporty!")
lbrycrd, err := util.GetLbrycrdClient(os.Getenv("LBRYCRD_STRING"))
if err != nil {
logrus.Fatal(err)
}
if lbrycrd == nil {
logrus.Fatal("Lbrycrd Client is nil")
}
amount, err := strconv.ParseFloat(os.Args[5], 64)
if err != nil {
logrus.Error(err)
}
name := os.Args[1]
claimid := os.Args[2]
claimAddress := os.Args[3]
blockChainName := os.Args[4]
logrus.Infof("Supporting %s[%s] with %.2f LBC on chain %s at address %s", name, claimid, amount, blockChainName, claimAddress)
hash, err := lbrycrd.SupportClaim(name, claimid, claimAddress, blockChainName, amount)
if err != nil {
logrus.Error(err)
}
if hash == nil {
logrus.Fatal("Tx not created!")
}
logrus.Info("Tx: ", hash.String())
}

View file

@ -0,0 +1,35 @@
version: "3"
networks:
lbry-network:
external: true
services:
###################
## Wallet Server ##
###################
walletserver:
image: lbry/wallet-server:v0.73.1
restart: always
networks:
lbry-network:
ipv4_address: 10.6.1.2
volumes:
- "../persist/data/.walletserver/database:/database"
environment:
- DB_DIRECTORY=/database
- MAX_SEND=1000000000000000000000
- DAEMON_URL=http://lbry:lbry@lbrycrd:29245/
- MAX_SUBS=1000000000000
- BANDWIDTH_LIMIT=80000000000
- SESSION_TIMEOUT=10000000000000000000000000
- TCP_PORT=50001
#network_mode: host
#network_mode: bridge
ports:
- "50001:50001"
expose:
- "50001"
ulimits:
nofile: 90000
# command: lbry.wallet.server.coin.LBC
command: lbry.wallet.server.coin.LBCRegTest

189
go.mod
View file

@ -1,44 +1,151 @@
module github.com/lbryio/ytsync
go 1.17
module github.com/lbryio/ytsync/v5
replace github.com/btcsuite/btcd => github.com/lbryio/lbrycrd.go v0.0.0-20200203050410-e1076f12bf19
//replace github.com/lbryio/lbry.go/v2 => /home/niko/go/src/github.com/lbryio/lbry.go/
//replace github.com/lbryio/reflector.go => /home/niko/go/src/github.com/lbryio/reflector.go/
require (
cloud.google.com/go v0.37.4 // indirect
github.com/ChannelMeter/iso8601duration v0.0.0-20150204201828-8da3af7a2a61
github.com/PuerkitoBio/goquery v1.5.0 // indirect
github.com/aws/aws-sdk-go v1.17.3
github.com/channelmeter/iso8601duration v0.0.0-20150204201828-8da3af7a2a61 // indirect
github.com/go-ini/ini v1.42.0 // indirect
github.com/golang/protobuf v1.3.1 // indirect
github.com/gopherjs/gopherjs v0.0.0-20190411002643-bd77b112433e // indirect
github.com/gorilla/websocket v1.4.0 // indirect
github.com/hashicorp/golang-lru v0.5.1 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/kr/pretty v0.1.0 // indirect
github.com/lbryio/errors.go v0.0.0-20180223142025-ad03d3cc6a5c
github.com/lbryio/lbry.go v1.0.7
github.com/lusis/slack-test v0.0.0-20190408224659-6cf59653add2 // indirect
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/nikooo777/ytdl v0.0.0-20190215151411-9c7832eaf457
github.com/onsi/ginkgo v1.8.0 // indirect
github.com/onsi/gomega v1.5.0 // indirect
github.com/prometheus/common v0.3.0
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24
github.com/sirupsen/logrus v1.4.1
github.com/smartystreets/assertions v0.0.0-20190401211740-f487f9de1cd3 // indirect
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect
github.com/spf13/cobra v0.0.0-20190109003409-7547e83b2d85
github.com/spf13/pflag v1.0.3 // indirect
github.com/ybbus/jsonrpc v2.1.2+incompatible // indirect
go.opencensus.io v0.20.2 // indirect
golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 // indirect
golang.org/x/net v0.0.0-20190415214537-1da14a5a36f2 // indirect
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a // indirect
google.golang.org/api v0.3.2
google.golang.org/appengine v1.5.0 // indirect
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 // indirect
google.golang.org/grpc v1.20.0 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/ini.v1 v1.42.0 // indirect
gopkg.in/yaml.v2 v2.2.2 // indirect
github.com/abadojack/whatlanggo v1.0.1
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
github.com/aws/aws-sdk-go v1.44.6
github.com/davecgh/go-spew v1.1.1
github.com/docker/docker v20.10.17+incompatible
github.com/lbryio/lbry.go/v2 v2.7.2-0.20220815204100-2adb8af5b68c
github.com/lbryio/reflector.go v1.1.3-0.20220730181028-f5d30b1a6e79
github.com/mitchellh/go-ps v1.0.0
github.com/prometheus/client_golang v1.12.1
github.com/shopspring/decimal v1.3.1
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.4.0
github.com/stretchr/testify v1.7.1
github.com/tkanos/gonfig v0.0.0-20210106201359-53e13348de2f
github.com/vbauerster/mpb/v7 v7.4.1
gopkg.in/vansante/go-ffprobe.v2 v2.0.3
gotest.tools v2.2.0+incompatible
)
require (
github.com/Microsoft/go-winio v0.5.1 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bluele/gcache v0.0.2 // indirect
github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7 // indirect
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3 // indirect
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d // indirect
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cheekybits/genny v1.0.0 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/gin-gonic/gin v1.7.7 // indirect
github.com/go-errors/errors v1.1.1 // indirect
github.com/go-ini/ini v1.48.0 // indirect
github.com/go-playground/locales v0.13.0 // indirect
github.com/go-playground/universal-translator v0.17.0 // indirect
github.com/go-playground/validator/v10 v10.4.1 // indirect
github.com/go-sql-driver/mysql v1.6.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/gofrs/uuid v3.2.0+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gorilla/rpc v1.2.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-immutable-radix v1.1.0 // indirect
github.com/hashicorp/go-msgpack v0.5.5 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/memberlist v0.3.0 // indirect
github.com/hashicorp/serf v0.9.7 // indirect
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/johntdyer/slack-go v0.0.0-20180213144715-95fac1160b22 // indirect
github.com/johntdyer/slackrus v0.0.0-20211215141436-33e4a270affb // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/karrick/godirwalk v1.17.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lbryio/chainquery v1.9.0 // indirect
github.com/lbryio/lbry.go v1.1.2 // indirect
github.com/lbryio/types v0.0.0-20220224142228-73610f6654a6 // indirect
github.com/leodido/go-urn v1.2.0 // indirect
github.com/lucas-clemente/quic-go v0.28.1 // indirect
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5 // indirect
github.com/magiconair/properties v1.8.1 // indirect
github.com/marten-seemann/qpack v0.2.1 // indirect
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/miekg/dns v1.1.41 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/onsi/ginkgo v1.16.4 // indirect
github.com/onsi/gomega v1.17.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
github.com/pelletier/go-toml v1.9.3 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/slack-go/slack v0.10.3 // indirect
github.com/spf13/afero v1.4.1 // indirect
github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/jwalterweatherman v1.0.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.7.1 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/ugorji/go/codec v1.1.7 // indirect
github.com/volatiletech/inflect v0.0.0-20170731032912-e7201282ae8d // indirect
github.com/volatiletech/null v8.0.0+incompatible // indirect
github.com/volatiletech/sqlboiler v3.4.0+incompatible // indirect
github.com/ybbus/jsonrpc v2.1.2+incompatible // indirect
go.uber.org/atomic v1.9.0 // indirect
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
golang.org/x/tools v0.1.5 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/ini.v1 v1.60.2 // indirect
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
gotest.tools/v3 v3.2.0 // indirect
)

1073
go.sum

File diff suppressed because it is too large Load diff

227
ip_manager/throttle.go Normal file
View file

@ -0,0 +1,227 @@
package ip_manager
import (
"net"
"sort"
"sync"
"time"
"github.com/asaskevich/govalidator"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/ytsync/v5/util"
log "github.com/sirupsen/logrus"
)
const IPCooldownPeriod = 20 * time.Second
const unbanTimeout = 48 * time.Hour
var stopper = stop.New()
type IPPool struct {
ips []throttledIP
lock *sync.RWMutex
stopGrp *stop.Group
}
type throttledIP struct {
IP string
UsedForVideo string
LastUse time.Time
Throttled bool
InUse bool
}
var ipPoolInstance *IPPool
func GetIPPool(stopGrp *stop.Group) (*IPPool, error) {
if ipPoolInstance != nil {
return ipPoolInstance, nil
}
addrs, err := net.InterfaceAddrs()
if err != nil {
return nil, errors.Err(err)
}
var pool []throttledIP
for _, address := range addrs {
if ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsGlobalUnicast() {
if ipnet.IP.To16() != nil && govalidator.IsIPv6(ipnet.IP.String()) {
pool = append(pool, throttledIP{
IP: ipnet.IP.String(),
LastUse: time.Now().Add(-5 * time.Minute),
})
} else if ipnet.IP.To4() != nil && govalidator.IsIPv4(ipnet.IP.String()) {
pool = append(pool, throttledIP{
IP: ipnet.IP.String(),
LastUse: time.Now().Add(-5 * time.Minute),
})
}
}
}
ipPoolInstance = &IPPool{
ips: pool,
lock: &sync.RWMutex{},
stopGrp: stopGrp,
}
//ticker := time.NewTicker(10 * time.Second)
//go func() {
// for {
// select {
// case <-stopGrp.Ch():
// return
// case <-ticker.C:
// ipPoolInstance.lock.RLock()
// for _, ip := range ipPoolInstance.ips {
// log.Debugf("IP: %s\tInUse: %t\tVideoID: %s\tThrottled: %t\tLastUse: %.1f", ip.IP, ip.InUse, ip.UsedForVideo, ip.Throttled, time.Since(ip.LastUse).Seconds())
// }
// ipPoolInstance.lock.RUnlock()
// }
// }
//}()
return ipPoolInstance, nil
}
// AllThrottled checks whether the IPs provided are all throttled.
// returns false if at least one IP is not throttled
// Not thread safe, should use locking when called
func AllThrottled(ips []throttledIP) bool {
for _, i := range ips {
if !i.Throttled {
return false
}
}
return true
}
// AllInUse checks whether the IPs provided are all currently in use.
// returns false if at least one IP is not in use AND is not throttled
// Not thread safe, should use locking when called
func AllInUse(ips []throttledIP) bool {
for _, i := range ips {
if !i.InUse && !i.Throttled {
return false
}
}
return true
}
func (i *IPPool) ReleaseIP(ip string) {
i.lock.Lock()
defer i.lock.Unlock()
for j := range i.ips {
localIP := &i.ips[j]
if localIP.IP == ip {
localIP.InUse = false
localIP.LastUse = time.Now()
return
}
}
util.SendErrorToSlack("something went wrong while releasing the IP %s as we reached the end of the function", ip)
}
func (i *IPPool) ReleaseAll() {
i.lock.Lock()
defer i.lock.Unlock()
for j := range i.ips {
if i.ips[j].Throttled {
continue
}
localIP := &i.ips[j]
localIP.InUse = false
}
}
func (i *IPPool) SetThrottled(ip string) {
i.lock.Lock()
defer i.lock.Unlock()
var tIP *throttledIP
for j, _ := range i.ips {
localIP := &i.ips[j]
if localIP.IP == ip {
if localIP.Throttled {
return
}
localIP.Throttled = true
tIP = localIP
break
}
}
util.SendErrorToSlack("%s set to throttled", ip)
stopper.Add(1)
go func(tIP *throttledIP) {
defer stopper.Done()
unbanTimer := time.NewTimer(unbanTimeout)
select {
case <-unbanTimer.C:
i.lock.Lock()
tIP.Throttled = false
i.lock.Unlock()
util.SendInfoToSlack("%s set back to not throttled", ip)
case <-i.stopGrp.Ch():
unbanTimer.Stop()
}
}(tIP)
}
var ErrAllInUse = errors.Base("all IPs are in use, try again")
var ErrAllThrottled = errors.Base("all IPs are throttled")
var ErrResourceLock = errors.Base("error getting next ip, did you forget to lock on the resource?")
var ErrInterruptedByUser = errors.Base("interrupted by user")
func (i *IPPool) nextIP(forVideo string) (*throttledIP, error) {
i.lock.Lock()
defer i.lock.Unlock()
sort.Slice(i.ips, func(j, k int) bool {
return i.ips[j].LastUse.Before(i.ips[k].LastUse)
})
if !AllThrottled(i.ips) {
if AllInUse(i.ips) {
return nil, errors.Err(ErrAllInUse)
}
var nextIP *throttledIP
for j := range i.ips {
ip := &i.ips[j]
if ip.InUse || ip.Throttled {
continue
}
nextIP = ip
break
}
if nextIP == nil {
return nil, errors.Err(ErrResourceLock)
}
nextIP.InUse = true
nextIP.UsedForVideo = forVideo
return nextIP, nil
}
return nil, errors.Err(ErrAllThrottled)
}
func (i *IPPool) GetIP(forVideo string) (string, error) {
for {
ip, err := i.nextIP(forVideo)
if err != nil {
if errors.Is(err, ErrAllInUse) {
select {
case <-i.stopGrp.Ch():
return "", errors.Err(ErrInterruptedByUser)
default:
time.Sleep(5 * time.Second)
continue
}
} else if errors.Is(err, ErrAllThrottled) {
return "throttled", err
}
return "", err
}
if time.Since(ip.LastUse) < IPCooldownPeriod {
log.Debugf("The IP %s is too hot, waiting for %.1f seconds before continuing", ip.IP, (IPCooldownPeriod - time.Since(ip.LastUse)).Seconds())
time.Sleep(IPCooldownPeriod - time.Since(ip.LastUse))
}
return ip.IP, nil
}
}

View file

@ -0,0 +1,40 @@
package ip_manager
import (
"testing"
)
func TestAll(t *testing.T) {
pool, err := GetIPPool()
if err != nil {
t.Fatal(err)
}
ip, err := pool.GetIP()
if err != nil {
t.Fatal(err)
}
t.Log(ip)
pool.ReleaseIP(ip)
ip2, err := pool.GetIP()
if err != nil {
t.Fatal(err)
}
if ip == ip2 && len(pool.ips) > 1 {
t.Fatalf("the same IP was returned twice! %s, %s", ip, ip2)
}
t.Log(ip2)
pool.ReleaseIP(ip2)
for range pool.ips {
_, err = pool.GetIP()
if err != nil {
t.Fatal(err)
}
}
next, err := pool.nextIP()
if err != nil {
t.Logf("%s", err.Error())
} else {
t.Fatal(next)
}
}

198
main.go
View file

@ -3,16 +3,21 @@ package main
import (
"fmt"
"math/rand"
"net/http"
"os"
"os/user"
"time"
"github.com/lbryio/lbry.go/extras/util"
"github.com/lbryio/ytsync/sdk"
"github.com/spf13/cobra"
"github.com/lbryio/ytsync/v5/configs"
"github.com/lbryio/ytsync/v5/manager"
"github.com/lbryio/ytsync/v5/shared"
ytUtils "github.com/lbryio/ytsync/v5/util"
"github.com/lbryio/ytsync/manager"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var Version string
@ -20,28 +25,21 @@ var Version string
const defaultMaxTries = 3
var (
stopOnError bool
maxTries int
takeOverExistingChannel bool
refill int
limit int
skipSpaceCheck bool
syncUpdate bool
singleRun bool
syncStatus string
channelID string
syncFrom int64
syncUntil int64
concurrentJobs int
videosLimit int
maxVideoSize int
maxVideoLength float64
cliFlags shared.SyncFlags
maxVideoLength int
)
func main() {
rand.Seed(time.Now().UnixNano())
log.SetLevel(log.DebugLevel)
customFormatter := new(log.TextFormatter)
customFormatter.TimestampFormat = "2006-01-02 15:04:05"
customFormatter.FullTimestamp = true
log.SetFormatter(customFormatter)
http.Handle("/metrics", promhttp.Handler())
go func() {
log.Error(http.ListenAndServe(":2112", nil))
}()
cmd := &cobra.Command{
Use: "ytsync",
Short: "Publish youtube channels into LBRY network automatically.",
@ -49,21 +47,25 @@ func main() {
Args: cobra.RangeArgs(0, 0),
}
cmd.Flags().BoolVar(&stopOnError, "stop-on-error", false, "If a publish fails, stop all publishing and exit")
cmd.Flags().IntVar(&maxTries, "max-tries", defaultMaxTries, "Number of times to try a publish that fails")
cmd.Flags().BoolVar(&takeOverExistingChannel, "takeover-existing-channel", false, "If channel exists and we don't own it, take over the channel")
cmd.Flags().IntVar(&limit, "limit", 0, "limit the amount of channels to sync")
cmd.Flags().BoolVar(&skipSpaceCheck, "skip-space-check", false, "Do not perform free space check on startup")
cmd.Flags().BoolVar(&syncUpdate, "update", false, "Update previously synced channels instead of syncing new ones")
cmd.Flags().BoolVar(&singleRun, "run-once", false, "Whether the process should be stopped after one cycle or not")
cmd.Flags().StringVar(&syncStatus, "status", "", "Specify which queue to pull from. Overrides --update")
cmd.Flags().StringVar(&channelID, "channelID", "", "If specified, only this channel will be synced.")
cmd.Flags().Int64Var(&syncFrom, "after", time.Unix(0, 0).Unix(), "Specify from when to pull jobs [Unix time](Default: 0)")
cmd.Flags().Int64Var(&syncUntil, "before", time.Now().AddDate(1, 0, 0).Unix(), "Specify until when to pull jobs [Unix time](Default: current Unix time)")
cmd.Flags().IntVar(&concurrentJobs, "concurrent-jobs", 1, "how many jobs to process concurrently")
cmd.Flags().IntVar(&videosLimit, "videos-limit", 1000, "how many videos to process per channel")
cmd.Flags().IntVar(&maxVideoSize, "max-size", 2048, "Maximum video size to process (in MB)")
cmd.Flags().Float64Var(&maxVideoLength, "max-length", 2.0, "Maximum video length to process (in hours)")
cmd.Flags().IntVar(&cliFlags.MaxTries, "max-tries", defaultMaxTries, "Number of times to try a publish that fails")
cmd.Flags().BoolVar(&cliFlags.TakeOverExistingChannel, "takeover-existing-channel", false, "If channel exists and we don't own it, take over the channel")
cmd.Flags().IntVar(&cliFlags.Limit, "limit", 0, "limit the amount of channels to sync")
cmd.Flags().BoolVar(&cliFlags.SkipSpaceCheck, "skip-space-check", false, "Do not perform free space check on startup")
cmd.Flags().BoolVar(&cliFlags.SyncUpdate, "update", false, "Update previously synced channels instead of syncing new ones")
cmd.Flags().BoolVar(&cliFlags.SingleRun, "run-once", false, "Whether the process should be stopped after one cycle or not")
cmd.Flags().BoolVar(&cliFlags.RemoveDBUnpublished, "remove-db-unpublished", false, "Remove videos from the database that are marked as published but aren't really published")
cmd.Flags().BoolVar(&cliFlags.UpgradeMetadata, "upgrade-metadata", false, "Upgrade videos if they're on the old metadata version")
cmd.Flags().BoolVar(&cliFlags.DisableTransfers, "no-transfers", false, "Skips the transferring process of videos, channels and supports")
cmd.Flags().BoolVar(&cliFlags.QuickSync, "quick", false, "Look up only the last 50 videos from youtube")
cmd.Flags().StringVar(&cliFlags.Status, "status", "", "Specify which queue to pull from. Overrides --update")
cmd.Flags().StringVar(&cliFlags.SecondaryStatus, "status2", "", "Specify which secondary queue to pull from.")
cmd.Flags().StringVar(&cliFlags.ChannelID, "channelID", "", "If specified, only this channel will be synced.")
cmd.Flags().Int64Var(&cliFlags.SyncFrom, "after", time.Unix(0, 0).Unix(), "Specify from when to pull jobs [Unix time](Default: 0)")
cmd.Flags().Int64Var(&cliFlags.SyncUntil, "before", time.Now().AddDate(1, 0, 0).Unix(), "Specify until when to pull jobs [Unix time](Default: current Unix time)")
cmd.Flags().IntVar(&cliFlags.ConcurrentJobs, "concurrent-jobs", 1, "how many jobs to process concurrently")
cmd.Flags().IntVar(&cliFlags.VideosLimit, "videos-limit", 0, "how many videos to process per channel (leave 0 for automatic detection)")
cmd.Flags().IntVar(&cliFlags.MaxVideoSize, "max-size", 2048, "Maximum video size to process (in MB)")
cmd.Flags().IntVar(&maxVideoLength, "max-length", 2, "Maximum video length to process (in hours)")
if err := cmd.Execute(); err != nil {
fmt.Println(err)
@ -72,126 +74,62 @@ func main() {
}
func ytSync(cmd *cobra.Command, args []string) {
var hostname string
slackToken := os.Getenv("SLACK_TOKEN")
if slackToken == "" {
log.Error("A slack token was not present in env vars! Slack messages disabled!")
err := configs.Init("./config.json")
if err != nil {
log.Fatalf("could not parse configuration file: %s", errors.FullTrace(err))
}
if configs.Configuration.SlackToken == "" {
log.Error("A slack token was not present in the config! Slack messages disabled!")
} else {
var err error
hostname, err = os.Hostname()
if err != nil {
log.Error("could not detect system hostname")
hostname = "ytsync-unknown"
}
util.InitSlack(os.Getenv("SLACK_TOKEN"), os.Getenv("SLACK_CHANNEL"), hostname)
util.InitSlack(configs.Configuration.SlackToken, configs.Configuration.SlackChannel, configs.Configuration.GetHostname())
}
if syncStatus != "" && !util.InSlice(syncStatus, manager.SyncStatuses) {
log.Errorf("status must be one of the following: %v\n", manager.SyncStatuses)
if cliFlags.Status != "" && !util.InSlice(cliFlags.Status, shared.SyncStatuses) {
log.Errorf("status must be one of the following: %v\n", shared.SyncStatuses)
return
}
if stopOnError && maxTries != defaultMaxTries {
log.Errorln("--stop-on-error and --max-tries are mutually exclusive")
return
}
if maxTries < 1 {
if cliFlags.MaxTries < 1 {
log.Errorln("setting --max-tries less than 1 doesn't make sense")
return
}
if limit < 0 {
if cliFlags.Limit < 0 {
log.Errorln("setting --limit less than 0 (unlimited) doesn't make sense")
return
}
cliFlags.MaxVideoLength = time.Duration(maxVideoLength) * time.Hour
apiURL := os.Getenv("LBRY_WEB_API")
apiToken := os.Getenv("LBRY_API_TOKEN")
youtubeAPIKey := os.Getenv("YOUTUBE_API_KEY")
blobsDir := os.Getenv("BLOBS_DIRECTORY")
lbrycrdString := os.Getenv("LBRYCRD_STRING")
awsS3ID := os.Getenv("AWS_S3_ID")
awsS3Secret := os.Getenv("AWS_S3_SECRET")
awsS3Region := os.Getenv("AWS_S3_REGION")
awsS3Bucket := os.Getenv("AWS_S3_BUCKET")
if apiURL == "" {
log.Errorln("An API URL was not defined. Please set the environment variable LBRY_WEB_API")
if configs.Configuration.InternalApisEndpoint == "" {
log.Errorln("An Internal APIs Endpoint was not defined")
return
}
if apiToken == "" {
log.Errorln("An API Token was not defined. Please set the environment variable LBRY_API_TOKEN")
if configs.Configuration.InternalApisAuthToken == "" {
log.Errorln("An Internal APIs auth token was not defined")
return
}
if youtubeAPIKey == "" {
log.Errorln("A Youtube API key was not defined. Please set the environment variable YOUTUBE_API_KEY")
if configs.Configuration.WalletS3Config.ID == "" || configs.Configuration.WalletS3Config.Region == "" || configs.Configuration.WalletS3Config.Bucket == "" || configs.Configuration.WalletS3Config.Secret == "" || configs.Configuration.WalletS3Config.Endpoint == "" {
log.Errorln("Wallet S3 configuration is incomplete")
return
}
if awsS3ID == "" {
log.Errorln("AWS S3 ID credentials were not defined. Please set the environment variable AWS_S3_ID")
if configs.Configuration.BlockchaindbS3Config.ID == "" || configs.Configuration.BlockchaindbS3Config.Region == "" || configs.Configuration.BlockchaindbS3Config.Bucket == "" || configs.Configuration.BlockchaindbS3Config.Secret == "" || configs.Configuration.BlockchaindbS3Config.Endpoint == "" {
log.Errorln("Blockchain DBs S3 configuration is incomplete")
return
}
if awsS3Secret == "" {
log.Errorln("AWS S3 Secret credentials were not defined. Please set the environment variable AWS_S3_SECRET")
return
}
if awsS3Region == "" {
log.Errorln("AWS S3 Region was not defined. Please set the environment variable AWS_S3_REGION")
return
}
if awsS3Bucket == "" {
log.Errorln("AWS S3 Bucket was not defined. Please set the environment variable AWS_S3_BUCKET")
return
}
if lbrycrdString == "" {
log.Infoln("Using default (local) lbrycrd instance. Set LBRYCRD_STRING if you want to use something else")
}
if blobsDir == "" {
usr, err := user.Current()
if err != nil {
log.Errorln(err.Error())
return
}
blobsDir = usr.HomeDir + "/.lbrynet/blobfiles/"
if configs.Configuration.LbrycrdString == "" {
log.Infoln("Using default (local) lbrycrd instance. Set lbrycrd_string if you want to use something else")
}
syncProperties := &sdk.SyncProperties{
SyncFrom: syncFrom,
SyncUntil: syncUntil,
YoutubeChannelID: channelID,
}
apiConfig := &sdk.APIConfig{
YoutubeAPIKey: youtubeAPIKey,
ApiURL: apiURL,
ApiToken: apiToken,
HostName: hostname,
}
blobsDir := ytUtils.GetBlobsDir()
sm := manager.NewSyncManager(
stopOnError,
maxTries,
takeOverExistingChannel,
refill,
limit,
skipSpaceCheck,
syncUpdate,
concurrentJobs,
concurrentJobs,
cliFlags,
blobsDir,
videosLimit,
maxVideoSize,
lbrycrdString,
awsS3ID,
awsS3Secret,
awsS3Region,
awsS3Bucket,
syncStatus,
singleRun,
syncProperties,
apiConfig,
maxVideoLength,
)
err := sm.Start()
err = sm.Start()
if err != nil {
manager.SendErrorToSlack(err.Error())
ytUtils.SendErrorToSlack(errors.FullTrace(err))
}
manager.SendInfoToSlack("Syncing process terminated!")
ytUtils.SendInfoToSlack("Syncing process terminated!")
}

View file

@ -1,32 +0,0 @@
package manager
import (
"net/http"
"github.com/lbryio/lbry.go/extras/errors"
"google.golang.org/api/googleapi/transport"
"google.golang.org/api/youtube/v3"
)
func (s *Sync) CountVideos() (uint64, error) {
client := &http.Client{
Transport: &transport.APIKey{Key: s.APIConfig.YoutubeAPIKey},
}
service, err := youtube.New(client)
if err != nil {
return 0, errors.Prefix("error creating YouTube service", err)
}
response, err := service.Channels.List("statistics").Id(s.YoutubeChannelID).Do()
if err != nil {
return 0, errors.Prefix("error getting channels", err)
}
if len(response.Items) < 1 {
return 0, errors.Err("youtube channel not found")
}
return response.Items[0].Statistics.VideoCount, nil
}

View file

@ -2,226 +2,186 @@ package manager
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"strings"
"sync"
"syscall"
"time"
"github.com/lbryio/ytsync/namer"
"github.com/lbryio/ytsync/sdk"
"github.com/lbryio/ytsync/v5/blobs_reflector"
"github.com/lbryio/ytsync/v5/configs"
"github.com/lbryio/ytsync/v5/ip_manager"
"github.com/lbryio/ytsync/v5/namer"
"github.com/lbryio/ytsync/v5/sdk"
"github.com/lbryio/ytsync/v5/shared"
logUtils "github.com/lbryio/ytsync/v5/util"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/extras/util"
log "github.com/sirupsen/logrus"
)
type SyncManager struct {
stopOnError bool
maxTries int
takeOverExistingChannel bool
refill int
limit int
skipSpaceCheck bool
syncUpdate bool
concurrentJobs int
concurrentVideos int
blobsDir string
videosLimit int
maxVideoSize int
maxVideoLength float64
lbrycrdString string
awsS3ID string
awsS3Secret string
awsS3Region string
syncStatus string
awsS3Bucket string
singleRun bool
syncProperties *sdk.SyncProperties
apiConfig *sdk.APIConfig
CliFlags shared.SyncFlags
ApiConfig *sdk.APIConfig
LbrycrdDsn string
blobsDir string
channelsToSync []Sync
}
func NewSyncManager(stopOnError bool, maxTries int, takeOverExistingChannel bool, refill int, limit int,
skipSpaceCheck bool, syncUpdate bool, concurrentJobs int, concurrentVideos int, blobsDir string, videosLimit int,
maxVideoSize int, lbrycrdString string, awsS3ID string, awsS3Secret string, awsS3Region string, awsS3Bucket string,
syncStatus string, singleRun bool, syncProperties *sdk.SyncProperties, apiConfig *sdk.APIConfig, maxVideoLength float64) *SyncManager {
func NewSyncManager(cliFlags shared.SyncFlags, blobsDir string) *SyncManager {
return &SyncManager{
stopOnError: stopOnError,
maxTries: maxTries,
takeOverExistingChannel: takeOverExistingChannel,
refill: refill,
limit: limit,
skipSpaceCheck: skipSpaceCheck,
syncUpdate: syncUpdate,
concurrentJobs: concurrentJobs,
concurrentVideos: concurrentVideos,
blobsDir: blobsDir,
videosLimit: videosLimit,
maxVideoSize: maxVideoSize,
maxVideoLength: maxVideoLength,
lbrycrdString: lbrycrdString,
awsS3ID: awsS3ID,
awsS3Secret: awsS3Secret,
awsS3Region: awsS3Region,
awsS3Bucket: awsS3Bucket,
syncStatus: syncStatus,
singleRun: singleRun,
syncProperties: syncProperties,
apiConfig: apiConfig,
CliFlags: cliFlags,
blobsDir: blobsDir,
LbrycrdDsn: configs.Configuration.LbrycrdString,
ApiConfig: sdk.GetAPIsConfigs(),
}
}
const (
StatusPending = "pending" // waiting for permission to sync
StatusPendingEmail = "pendingemail" // permission granted but missing email
StatusQueued = "queued" // in sync queue. will be synced soon
StatusPendingUpgrade = "pendingupgrade" // in sync queue. will be synced soon
StatusSyncing = "syncing" // syncing now
StatusSynced = "synced" // done
StatusFailed = "failed"
StatusFinalized = "finalized" // no more changes allowed
StatusAbandoned = "abandoned" // deleted on youtube or banned
)
var SyncStatuses = []string{StatusPending, StatusPendingEmail, StatusPendingUpgrade, StatusQueued, StatusSyncing, StatusSynced, StatusFailed, StatusFinalized, StatusAbandoned}
const (
VideoStatusPublished = "published"
VideoStatusFailed = "failed"
VideoStatusUnpublished = "unpublished"
)
func (s *SyncManager) enqueueChannel(channel *shared.YoutubeChannel) {
s.channelsToSync = append(s.channelsToSync, Sync{
DbChannelData: channel,
Manager: s,
namer: namer.NewNamer(),
hardVideoFailure: hardVideoFailure{
lock: &sync.Mutex{},
},
})
}
func (s *SyncManager) Start() error {
syncCount := 0
for {
err := s.checkUsedSpace()
if logUtils.ShouldCleanOnStartup() {
err := logUtils.CleanForStartup()
if err != nil {
return err
}
}
var syncs []Sync
var lastChannelProcessed string
var secondLastChannelProcessed string
syncCount := 0
for {
s.channelsToSync = make([]Sync, 0, 10) // reset sync queue
err := s.checkUsedSpace()
if err != nil {
return errors.Err(err)
}
shouldInterruptLoop := false
isSingleChannelSync := s.syncProperties.YoutubeChannelID != ""
if isSingleChannelSync {
channels, err := s.apiConfig.FetchChannels("", s.syncProperties)
if s.CliFlags.IsSingleChannelSync() {
channels, err := s.ApiConfig.FetchChannels("", &s.CliFlags)
if err != nil {
return err
return errors.Err(err)
}
if len(channels) != 1 {
return errors.Err("Expected 1 channel, %d returned", len(channels))
}
lbryChannelName := channels[0].DesiredChannelName
syncs = make([]Sync, 1)
syncs[0] = Sync{
APIConfig: s.apiConfig,
YoutubeChannelID: s.syncProperties.YoutubeChannelID,
LbryChannelName: lbryChannelName,
lbryChannelID: channels[0].ChannelClaimID,
StopOnError: s.stopOnError,
MaxTries: s.maxTries,
ConcurrentVideos: s.concurrentVideos,
TakeOverExistingChannel: s.takeOverExistingChannel,
Refill: s.refill,
Manager: s,
LbrycrdString: s.lbrycrdString,
AwsS3ID: s.awsS3ID,
AwsS3Secret: s.awsS3Secret,
AwsS3Region: s.awsS3Region,
AwsS3Bucket: s.awsS3Bucket,
namer: namer.NewNamer(),
}
s.enqueueChannel(&channels[0])
shouldInterruptLoop = true
} else {
var queuesToSync []string
if s.syncStatus != "" {
queuesToSync = append(queuesToSync, s.syncStatus)
} else if s.syncUpdate {
queuesToSync = append(queuesToSync, StatusSyncing, StatusSynced)
if s.CliFlags.Status != "" {
queuesToSync = append(queuesToSync, shared.StatusSyncing, s.CliFlags.Status)
} else if s.CliFlags.SyncUpdate {
queuesToSync = append(queuesToSync, shared.StatusSyncing, shared.StatusSynced)
} else {
queuesToSync = append(queuesToSync, StatusSyncing, StatusQueued)
queuesToSync = append(queuesToSync, shared.StatusSyncing, shared.StatusQueued)
}
if s.CliFlags.SecondaryStatus != "" {
queuesToSync = append(queuesToSync, s.CliFlags.SecondaryStatus)
}
queues:
for _, q := range queuesToSync {
channels, err := s.apiConfig.FetchChannels(q, s.syncProperties)
channels, err := s.ApiConfig.FetchChannels(q, &s.CliFlags)
if err != nil {
return err
}
log.Infof("Currently processing the \"%s\" queue with %d channels", q, len(channels))
for _, c := range channels {
syncs = append(syncs, Sync{
APIConfig: s.apiConfig,
YoutubeChannelID: c.ChannelId,
LbryChannelName: c.DesiredChannelName,
lbryChannelID: c.ChannelClaimID,
StopOnError: s.stopOnError,
MaxTries: s.maxTries,
ConcurrentVideos: s.concurrentVideos,
TakeOverExistingChannel: s.takeOverExistingChannel,
Refill: s.refill,
Manager: s,
LbrycrdString: s.lbrycrdString,
AwsS3ID: s.awsS3ID,
AwsS3Secret: s.awsS3Secret,
AwsS3Region: s.awsS3Region,
AwsS3Bucket: s.awsS3Bucket,
namer: namer.NewNamer(),
})
s.enqueueChannel(&c)
queueAll := q == shared.StatusFailed || q == shared.StatusSyncing
if !queueAll {
break queues
}
}
log.Infof("Drained the \"%s\" queue", q)
}
}
if len(syncs) == 0 {
if len(s.channelsToSync) == 0 {
log.Infoln("No channels to sync. Pausing 5 minutes!")
time.Sleep(5 * time.Minute)
}
for i, sync := range syncs {
for _, sync := range s.channelsToSync {
if lastChannelProcessed == sync.DbChannelData.ChannelId && secondLastChannelProcessed == lastChannelProcessed {
util.SendToSlack("We just killed a sync for %s to stop looping! (%s)", sync.DbChannelData.DesiredChannelName, sync.DbChannelData.ChannelId)
stopTheLoops := errors.Err("Found channel %s running 3 times, set it to failed, and reprocess later", sync.DbChannelData.DesiredChannelName)
sync.setChannelTerminationStatus(&stopTheLoops)
continue
}
secondLastChannelProcessed = lastChannelProcessed
lastChannelProcessed = sync.DbChannelData.ChannelId
shouldNotCount := false
SendInfoToSlack("Syncing %s (%s) to LBRY! (iteration %d/%d - total processed channels: %d)", sync.LbryChannelName, sync.YoutubeChannelID, i+1, len(syncs), syncCount+1)
logUtils.SendInfoToSlack("Syncing %s (%s) to LBRY! total processed channels since startup: %d", sync.DbChannelData.DesiredChannelName, sync.DbChannelData.ChannelId, syncCount+1)
err := sync.FullCycle()
//TODO: THIS IS A TEMPORARY WORK AROUND FOR THE STUPID IP LOCKUP BUG
ipPool, _ := ip_manager.GetIPPool(sync.grp)
if ipPool != nil {
ipPool.ReleaseAll()
}
if err != nil {
if strings.Contains(err.Error(), "quotaExceeded") {
logUtils.SleepUntilQuotaReset()
}
fatalErrors := []string{
"default_wallet already exists",
"WALLET HAS NOT BEEN MOVED TO THE WALLET BACKUP DIR",
"NotEnoughFunds",
"no space left on device",
"failure uploading wallet",
"there was a problem uploading the wallet",
"the channel in the wallet is different than the channel in the database",
"this channel does not belong to this wallet!",
"You already have a stream claim published under the name",
}
if util.SubstringInSlice(err.Error(), fatalErrors) {
return errors.Prefix("@Nikooo777 this requires manual intervention! Exiting...", err)
}
shouldNotCount = strings.Contains(err.Error(), "this youtube channel is being managed by another server")
if !shouldNotCount {
SendInfoToSlack("A non fatal error was reported by the sync process. %s\nContinuing...", err.Error())
logUtils.SendInfoToSlack("A non fatal error was reported by the sync process.\n%s", errors.FullTrace(err))
}
}
SendInfoToSlack("Syncing %s (%s) reached an end. (iteration %d/%d - total processed channels: %d)", sync.LbryChannelName, sync.YoutubeChannelID, i+1, len(syncs), syncCount+1)
err = logUtils.CleanupMetadata()
if err != nil {
log.Errorf("something went wrong while trying to clear out the video metadata directory: %s", errors.FullTrace(err))
}
err = blobs_reflector.ReflectAndClean()
if err != nil {
return errors.Prefix("@Nikooo777 something went wrong while reflecting blobs", err)
}
logUtils.SendInfoToSlack("%s (%s) reached an end. Total processed channels since startup: %d", sync.DbChannelData.DesiredChannelName, sync.DbChannelData.ChannelId, syncCount+1)
if !shouldNotCount {
syncCount++
}
if sync.IsInterrupted() || (s.limit != 0 && syncCount >= s.limit) {
if sync.IsInterrupted() || (s.CliFlags.Limit != 0 && syncCount >= s.CliFlags.Limit) {
shouldInterruptLoop = true
break
}
}
if shouldInterruptLoop || s.singleRun {
if shouldInterruptLoop || s.CliFlags.SingleRun {
break
}
}
return nil
}
func (s *SyncManager) GetS3AWSConfig() aws.Config {
return aws.Config{
Credentials: credentials.NewStaticCredentials(s.awsS3ID, s.awsS3Secret, ""),
Region: &s.awsS3Region,
}
}
func (s *SyncManager) checkUsedSpace() error {
usedPctile, err := GetUsedSpace(s.blobsDir)
usedPctile, err := GetUsedSpace(logUtils.GetBlobsDir())
if err != nil {
return err
return errors.Err(err)
}
if usedPctile >= 0.90 && !s.skipSpaceCheck {
if usedPctile >= 0.90 && !s.CliFlags.SkipSpaceCheck {
return errors.Err(fmt.Sprintf("more than 90%% of the space has been used. use --skip-space-check to ignore. Used: %.1f%%", usedPctile*100))
}
log.Infof("disk usage: %.1f%%", usedPctile*100)

285
manager/s3_storage.go Normal file
View file

@ -0,0 +1,285 @@
package manager
import (
"os"
"path/filepath"
"strings"
"time"
"github.com/lbryio/ytsync/v5/configs"
"github.com/lbryio/ytsync/v5/util"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
log "github.com/sirupsen/logrus"
)
func (s *Sync) getS3Downloader(config *aws.Config) (*s3manager.Downloader, error) {
s3Session, err := session.NewSession(config)
if err != nil {
return nil, errors.Prefix("error starting session", err)
}
downloader := s3manager.NewDownloader(s3Session)
return downloader, nil
}
func (s *Sync) getS3Uploader(config *aws.Config) (*s3manager.Uploader, error) {
s3Session, err := session.NewSession(config)
if err != nil {
return nil, errors.Prefix("error starting session", err)
}
uploader := s3manager.NewUploader(s3Session)
return uploader, nil
}
func (s *Sync) downloadWallet() error {
defaultWalletDir, defaultTempWalletDir, key, err := s.getWalletPaths()
if err != nil {
return errors.Err(err)
}
downloader, err := s.getS3Downloader(configs.Configuration.WalletS3Config.GetS3AWSConfig())
if err != nil {
return err
}
out, err := os.Create(defaultTempWalletDir)
if err != nil {
return errors.Prefix("error creating temp wallet", err)
}
defer out.Close()
bytesWritten, err := downloader.Download(out, &s3.GetObjectInput{
Bucket: aws.String(configs.Configuration.WalletS3Config.Bucket),
Key: key,
})
if err != nil {
// Casting to the awserr.Error type will allow you to inspect the error
// code returned by the service in code. The error code can be used
// to switch on context specific functionality. In this case a context
// specific error message is printed to the user based on the bucket
// and key existing.
//
// For information on other S3 API error codes see:
// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
if aerr, ok := err.(awserr.Error); ok {
code := aerr.Code()
if code == s3.ErrCodeNoSuchKey {
return errors.Err("wallet not on S3")
}
}
return err
} else if bytesWritten == 0 {
return errors.Err("zero bytes written")
}
err = os.Rename(defaultTempWalletDir, defaultWalletDir)
if err != nil {
return errors.Prefix("error replacing temp wallet for default wallet", err)
}
return nil
}
func (s *Sync) downloadBlockchainDB() error {
if util.IsRegTest() {
return nil // tests fail if we re-use the same blockchain DB
}
defaultBDBPath, defaultTempBDBPath, key, err := s.getBlockchainDBPaths()
if err != nil {
return errors.Err(err)
}
files, err := filepath.Glob(defaultBDBPath + "*")
if err != nil {
return errors.Err(err)
}
for _, f := range files {
err = os.Remove(f)
if err != nil {
return errors.Err(err)
}
}
if s.DbChannelData.WipeDB {
return nil
}
downloader, err := s.getS3Downloader(configs.Configuration.BlockchaindbS3Config.GetS3AWSConfig())
if err != nil {
return errors.Err(err)
}
out, err := os.Create(defaultTempBDBPath)
if err != nil {
return errors.Prefix("error creating temp blockchain DB file", err)
}
defer out.Close()
bytesWritten, err := downloader.Download(out, &s3.GetObjectInput{
Bucket: aws.String(configs.Configuration.BlockchaindbS3Config.Bucket),
Key: key,
})
if err != nil {
// Casting to the awserr.Error type will allow you to inspect the error
// code returned by the service in code. The error code can be used
// to switch on context specific functionality. In this case a context
// specific error message is printed to the user based on the bucket
// and key existing.
//
// For information on other S3 API error codes see:
// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
if aerr, ok := err.(awserr.Error); ok {
code := aerr.Code()
if code == s3.ErrCodeNoSuchKey {
return nil // let ytsync sync the database by itself
}
}
return errors.Err(err)
} else if bytesWritten == 0 {
return errors.Err("zero bytes written")
}
blockchainDbDir := strings.Replace(defaultBDBPath, "blockchain.db", "", -1)
err = util.Untar(defaultTempBDBPath, blockchainDbDir)
if err != nil {
return errors.Prefix("error extracting blockchain.db files", err)
}
err = os.Remove(defaultTempBDBPath)
if err != nil {
return errors.Err(err)
}
log.Printf("blockchain.db data downloaded and extracted to %s", blockchainDbDir)
return nil
}
func (s *Sync) getWalletPaths() (defaultWallet, tempWallet string, key *string, err error) {
defaultWallet = os.Getenv("HOME") + "/.lbryum/wallets/default_wallet"
tempWallet = os.Getenv("HOME") + "/.lbryum/wallets/tmp_wallet"
key = aws.String("/wallets/" + s.DbChannelData.ChannelId)
if util.IsRegTest() {
defaultWallet = os.Getenv("HOME") + "/.lbryum_regtest/wallets/default_wallet"
tempWallet = os.Getenv("HOME") + "/.lbryum_regtest/wallets/tmp_wallet"
key = aws.String("/regtest/" + s.DbChannelData.ChannelId)
}
lbryumDir := os.Getenv("LBRYUM_DIR")
if lbryumDir != "" {
defaultWallet = lbryumDir + "/wallets/default_wallet"
tempWallet = lbryumDir + "/wallets/tmp_wallet"
}
if _, err := os.Stat(defaultWallet); !os.IsNotExist(err) {
return "", "", nil, errors.Err("default_wallet already exists")
}
return
}
func (s *Sync) getBlockchainDBPaths() (defaultDB, tempDB string, key *string, err error) {
lbryumDir := os.Getenv("LBRYUM_DIR")
if lbryumDir == "" {
if util.IsRegTest() {
lbryumDir = os.Getenv("HOME") + "/.lbryum_regtest"
} else {
lbryumDir = os.Getenv("HOME") + "/.lbryum"
}
}
defaultDB = lbryumDir + "/lbc_mainnet/blockchain.db"
tempDB = lbryumDir + "/lbc_mainnet/tmp_blockchain.tar"
key = aws.String("/blockchain_dbs/" + s.DbChannelData.ChannelId + ".tar")
if util.IsRegTest() {
defaultDB = lbryumDir + "/lbc_regtest/blockchain.db"
tempDB = lbryumDir + "/lbc_regtest/tmp_blockchain.tar"
key = aws.String("/regtest_dbs/" + s.DbChannelData.ChannelId + ".tar")
}
return
}
func (s *Sync) uploadWallet() error {
defaultWalletDir := util.GetDefaultWalletPath()
key := aws.String("/wallets/" + s.DbChannelData.ChannelId)
if util.IsRegTest() {
key = aws.String("/regtest/" + s.DbChannelData.ChannelId)
}
if _, err := os.Stat(defaultWalletDir); os.IsNotExist(err) {
return errors.Err("default_wallet does not exist")
}
uploader, err := s.getS3Uploader(configs.Configuration.WalletS3Config.GetS3AWSConfig())
if err != nil {
return err
}
file, err := os.Open(defaultWalletDir)
if err != nil {
return err
}
defer file.Close()
start := time.Now()
for time.Since(start) < 30*time.Minute {
_, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(configs.Configuration.WalletS3Config.Bucket),
Key: key,
Body: file,
})
if err != nil {
time.Sleep(30 * time.Second)
continue
}
break
}
if err != nil {
return errors.Prefix("there was a problem uploading the wallet to S3", errors.Err(err))
}
log.Println("wallet uploaded to S3")
return os.Remove(defaultWalletDir)
}
func (s *Sync) uploadBlockchainDB() error {
defaultBDBDir, _, key, err := s.getBlockchainDBPaths()
if err != nil {
return errors.Err(err)
}
if _, err := os.Stat(defaultBDBDir); os.IsNotExist(err) {
return errors.Err("blockchain.db does not exist")
}
files, err := filepath.Glob(defaultBDBDir + "*")
if err != nil {
return errors.Err(err)
}
tarPath := strings.Replace(defaultBDBDir, "blockchain.db", "", -1) + s.DbChannelData.ChannelId + ".tar"
err = util.CreateTarball(tarPath, files)
if err != nil {
return err
}
uploader, err := s.getS3Uploader(configs.Configuration.BlockchaindbS3Config.GetS3AWSConfig())
if err != nil {
return err
}
file, err := os.Open(tarPath)
if err != nil {
return err
}
defer file.Close()
_, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(configs.Configuration.BlockchaindbS3Config.Bucket),
Key: key,
Body: file,
})
if err != nil {
return err
}
log.Println("blockchain.db files uploaded to S3")
err = os.Remove(tarPath)
if err != nil {
return errors.Err(err)
}
return os.Remove(defaultBDBDir)
}

View file

@ -2,39 +2,46 @@ package manager
import (
"fmt"
"net/http"
"os"
"math"
"strconv"
"strings"
"time"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/extras/jsonrpc"
"github.com/lbryio/lbry.go/extras/util"
"github.com/lbryio/lbry.go/lbrycrd"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/jsonrpc"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/lbryio/ytsync/v5/shared"
"github.com/lbryio/ytsync/v5/timing"
logUtils "github.com/lbryio/ytsync/v5/util"
"github.com/lbryio/ytsync/v5/ytapi"
"github.com/lbryio/ytsync/tagsManager"
"github.com/lbryio/ytsync/thumbs"
"github.com/lbryio/ytsync/v5/tags_manager"
"github.com/lbryio/ytsync/v5/thumbs"
"github.com/shopspring/decimal"
log "github.com/sirupsen/logrus"
"google.golang.org/api/googleapi/transport"
"google.golang.org/api/youtube/v3"
)
const minimumRefillAmount = 3
func (s *Sync) enableAddressReuse() error {
accountsResponse, err := s.daemon.AccountList()
accountsResponse, err := s.daemon.AccountList(1, 50)
if err != nil {
return errors.Err(err)
}
accounts := accountsResponse.LBCMainnet
if os.Getenv("REGTEST") == "true" {
accounts = accountsResponse.LBCRegtest
accounts := make([]jsonrpc.Account, 0, len(accountsResponse.Items))
ledger := "lbc_mainnet"
if logUtils.IsRegTest() {
ledger = "lbc_regtest"
}
for _, a := range accountsResponse.Items {
if *a.Ledger == ledger {
accounts = append(accounts, a)
}
}
for _, a := range accounts {
_, err = s.daemon.AccountSet(a.ID, jsonrpc.AccountSettings{
ChangeMaxUses: 1000,
ChangeMaxUses: util.PtrToInt(1000),
ReceivingMaxUses: util.PtrToInt(100),
})
if err != nil {
return errors.Err(err)
@ -43,7 +50,11 @@ func (s *Sync) enableAddressReuse() error {
return nil
}
func (s *Sync) walletSetup() error {
//prevent unnecessary concurrent execution
start := time.Now()
defer func(start time.Time) {
timing.TimedComponent("walletSetup").Add(time.Since(start))
}(start)
//prevent unnecessary concurrent execution and publishing while refilling/reallocating UTXOs
s.walletMux.Lock()
defer s.walletMux.Unlock()
err := s.ensureChannelOwnership()
@ -57,74 +68,87 @@ func (s *Sync) walletSetup() error {
} else if balanceResp == nil {
return errors.Err("no response")
}
balance, err := strconv.ParseFloat((string)(*balanceResp), 64)
balance, err := strconv.ParseFloat(balanceResp.Available.String(), 64)
if err != nil {
return errors.Err(err)
}
log.Debugf("Starting balance is %.4f", balance)
n, err := s.CountVideos()
if err != nil {
return err
}
numOnSource := int(n)
videosOnYoutube := int(s.DbChannelData.TotalVideos)
log.Debugf("Source channel has %d videos", numOnSource)
if numOnSource == 0 {
log.Debugf("Source channel has %d videos", videosOnYoutube)
if videosOnYoutube == 0 {
return nil
}
s.syncedVideosMux.RLock()
numPublished := len(s.syncedVideos) //should we only count published videos? Credits are allocated even for failed ones...
s.syncedVideosMux.RUnlock()
log.Debugf("We already allocated credits for %d videos", numPublished)
if numOnSource-numPublished > s.Manager.videosLimit {
numOnSource = s.Manager.videosLimit
}
minBalance := (float64(numOnSource)-float64(numPublished))*(publishAmount+0.1) + channelClaimAmount
if s.Manager.syncStatus == StatusPendingUpgrade {
videosToUpgrade := 0
for _, v := range s.syncedVideos {
if v.Published && v.MetadataVersion < 2 {
videosToUpgrade++
publishedCount := 0
notUpgradedCount := 0
failedCount := 0
for _, sv := range s.syncedVideos {
if sv.Published {
publishedCount++
if sv.MetadataVersion < 2 {
notUpgradedCount++
}
}
minBalance += float64(videosToUpgrade) * 0.001
}
if numPublished > numOnSource && balance < minimumRefillAmount {
SendErrorToSlack("something is going on as we published more videos than those available on source: %d/%d", numPublished, numOnSource)
minBalance = minimumRefillAmount
}
amountToAdd := minBalance - balance
if s.Refill > 0 {
if amountToAdd < 0 {
amountToAdd = float64(s.Refill)
} else {
amountToAdd += float64(s.Refill)
failedCount++
}
}
s.syncedVideosMux.RUnlock()
if amountToAdd > 0 {
if amountToAdd < minimumRefillAmount {
amountToAdd = minimumRefillAmount
}
err := s.addCredits(amountToAdd)
log.Debugf("We already allocated credits for %d published videos and %d failed videos", publishedCount, failedCount)
if videosOnYoutube > s.Manager.CliFlags.VideosToSync(s.DbChannelData.TotalSubscribers) {
videosOnYoutube = s.Manager.CliFlags.VideosToSync(s.DbChannelData.TotalSubscribers)
}
unallocatedVideos := videosOnYoutube - (publishedCount + failedCount)
if unallocatedVideos < 0 {
unallocatedVideos = 0
}
channelFee := channelClaimAmount
channelAlreadyClaimed := s.DbChannelData.ChannelClaimID != ""
if channelAlreadyClaimed {
channelFee = 0.0
}
requiredBalance := float64(unallocatedVideos)*(publishAmount+estimatedMaxTxFee) + channelFee
if s.Manager.CliFlags.UpgradeMetadata {
requiredBalance += float64(notUpgradedCount) * estimatedMaxTxFee
}
refillAmount := 0.0
if balance < requiredBalance || balance < minimumAccountBalance {
refillAmount = math.Max(math.Max(requiredBalance-balance, minimumAccountBalance-balance), minimumRefillAmount)
}
if s.Manager.CliFlags.Refill > 0 {
refillAmount += float64(s.Manager.CliFlags.Refill)
}
if refillAmount > 0 {
err := s.addCredits(refillAmount)
if err != nil {
return errors.Err(err)
}
} else if balance > requiredBalance {
extraLBC := balance - requiredBalance
if extraLBC > 5 {
sendBackAmount := extraLBC - 1
logUtils.SendInfoToSlack("channel %s has %.1f credits which is %.1f more than it requires (%.1f). We should send at least %.1f that back.", s.DbChannelData.ChannelId, balance, extraLBC, requiredBalance, sendBackAmount)
}
}
claimAddress, err := s.daemon.AddressList(nil)
claimAddress, err := s.daemon.AddressList(nil, nil, 1, 20)
if err != nil {
return err
} else if claimAddress == nil {
return errors.Err("could not get unused address")
return errors.Err("could not get an address")
}
s.claimAddress = string((*claimAddress)[0]) //TODO: remove claimAddress completely
if s.claimAddress == "" {
if s.DbChannelData.PublishAddress.Address == "" || !s.shouldTransfer() {
s.DbChannelData.PublishAddress.Address = string(claimAddress.Items[0].Address)
s.DbChannelData.PublishAddress.IsMine = true
}
if s.DbChannelData.PublishAddress.Address == "" {
return errors.Err("found blank claim address")
}
@ -136,27 +160,47 @@ func (s *Sync) walletSetup() error {
return nil
}
func (s *Sync) ensureEnoughUTXOs() error {
accounts, err := s.daemon.AccountList()
if err != nil {
return errors.Err(err)
}
accountsNet := (*accounts).LBCMainnet
if os.Getenv("REGTEST") == "true" {
accountsNet = (*accounts).LBCRegtest
}
defaultAccount := ""
for _, account := range accountsNet {
if account.IsDefault {
defaultAccount = account.ID
break
func (s *Sync) getDefaultAccount() (string, error) {
start := time.Now()
defer func(start time.Time) {
timing.TimedComponent("getDefaultAccount").Add(time.Since(start))
}(start)
if s.defaultAccountID == "" {
accountsResponse, err := s.daemon.AccountList(1, 50)
if err != nil {
return "", errors.Err(err)
}
ledger := "lbc_mainnet"
if logUtils.IsRegTest() {
ledger = "lbc_regtest"
}
for _, a := range accountsResponse.Items {
if *a.Ledger == ledger {
if a.IsDefault {
s.defaultAccountID = a.ID
break
}
}
}
if s.defaultAccountID == "" {
return "", errors.Err("No default account found")
}
}
if defaultAccount == "" {
return errors.Err("No default account found")
return s.defaultAccountID, nil
}
func (s *Sync) ensureEnoughUTXOs() error {
start := time.Now()
defer func(start time.Time) {
timing.TimedComponent("ensureEnoughUTXOs").Add(time.Since(start))
}(start)
defaultAccount, err := s.getDefaultAccount()
if err != nil {
return err
}
utxolist, err := s.daemon.UTXOList(&defaultAccount)
utxolist, err := s.daemon.UTXOList(&defaultAccount, 1, 10000)
if err != nil {
return err
} else if utxolist == nil {
@ -166,14 +210,19 @@ func (s *Sync) ensureEnoughUTXOs() error {
target := 40
slack := int(float32(0.1) * float32(target))
count := 0
confirmedCount := 0
for _, utxo := range *utxolist {
for _, utxo := range utxolist.Items {
amount, _ := strconv.ParseFloat(utxo.Amount, 64)
if !utxo.IsClaim && !utxo.IsSupport && !utxo.IsUpdate && amount != 0.0 {
if utxo.IsMyOutput && utxo.Type == "payment" && amount > 0.001 {
if utxo.Confirmations > 0 {
confirmedCount++
}
count++
}
}
log.Infof("utxo count: %d", count)
log.Infof("utxo count: %d (%d confirmed)", count, confirmedCount)
UTXOWaitThreshold := 16
if count < target-slack {
balance, err := s.daemon.AccountBalance(&defaultAccount)
if err != nil {
@ -182,27 +231,44 @@ func (s *Sync) ensureEnoughUTXOs() error {
return errors.Err("no response")
}
balanceAmount, err := strconv.ParseFloat((string)(*balance), 64)
balanceAmount, err := strconv.ParseFloat(balance.Available.String(), 64)
if err != nil {
return errors.Err(err)
}
broadcastFee := 0.01
amountToSplit := fmt.Sprintf("%.6f", balanceAmount-broadcastFee)
//this is dumb but sometimes the balance is negative and it breaks everything, so let's check again
if balanceAmount < 0 {
log.Infof("negative balance of %.2f found. Waiting to retry...", balanceAmount)
time.Sleep(10 * time.Second)
balanceAmount, err = strconv.ParseFloat(balance.Available.String(), 64)
if err != nil {
return errors.Err(err)
}
}
maxUTXOs := uint64(500)
desiredUTXOCount := uint64(math.Floor((balanceAmount) / 0.1))
if desiredUTXOCount > maxUTXOs {
desiredUTXOCount = maxUTXOs
}
if desiredUTXOCount < uint64(confirmedCount) {
return nil
}
availableBalance, _ := balance.Available.Float64()
log.Infof("Splitting balance of %.3f evenly between %d UTXOs", availableBalance, desiredUTXOCount)
log.Infof("Splitting balance of %s evenly between 40 UTXOs", *balance)
prefillTx, err := s.daemon.AccountFund(defaultAccount, defaultAccount, amountToSplit, uint64(target))
broadcastFee := 0.1
prefillTx, err := s.daemon.AccountFund(defaultAccount, defaultAccount, fmt.Sprintf("%.4f", balanceAmount-broadcastFee), desiredUTXOCount, false)
if err != nil {
return err
} else if prefillTx == nil {
return errors.Err("no response")
}
err = s.waitForNewBlock()
if err != nil {
return err
if confirmedCount < UTXOWaitThreshold {
err = s.waitForNewBlock()
if err != nil {
return err
}
}
} else if !allUTXOsConfirmed(utxolist) {
} else if confirmedCount < UTXOWaitThreshold {
log.Println("Waiting for previous txns to confirm")
err := s.waitForNewBlock()
if err != nil {
@ -214,6 +280,9 @@ func (s *Sync) ensureEnoughUTXOs() error {
}
func (s *Sync) waitForNewBlock() error {
defer func(start time.Time) { timing.TimedComponent("waitForNewBlock").Add(time.Since(start)) }(time.Now())
log.Printf("regtest: %t, docker: %t", logUtils.IsRegTest(), logUtils.IsUsingDocker())
status, err := s.daemon.Status()
if err != nil {
return err
@ -226,72 +295,91 @@ func (s *Sync) waitForNewBlock() error {
return err
}
}
currentBlock := status.Wallet.Blocks
for i := 0; status.Wallet.Blocks <= currentBlock; i++ {
if i%3 == 0 {
log.Printf("Waiting for new block (%d)...", currentBlock+1)
}
if logUtils.IsRegTest() && logUtils.IsUsingDocker() {
err = s.GenerateRegtestBlock()
if err != nil {
return err
}
}
time.Sleep(10 * time.Second)
status, err = s.daemon.Status()
if err != nil {
return err
}
}
time.Sleep(5 * time.Second)
return nil
}
func (s *Sync) GenerateRegtestBlock() error {
lbrycrd, err := logUtils.GetLbrycrdClient(s.Manager.LbrycrdDsn)
if err != nil {
return errors.Prefix("error getting lbrycrd client", err)
}
txs, err := lbrycrd.Generate(1)
if err != nil {
return errors.Prefix("error generating new block", err)
}
for _, tx := range txs {
log.Info("Generated tx: ", tx.String())
}
return nil
}
func (s *Sync) ensureChannelOwnership() error {
if s.LbryChannelName == "" {
defer func(start time.Time) { timing.TimedComponent("ensureChannelOwnership").Add(time.Since(start)) }(time.Now())
if s.DbChannelData.DesiredChannelName == "" {
return errors.Err("no channel name set")
}
//@TODO: get rid of this when imported channels are supported
if s.YoutubeChannelID == "UCW-thz5HxE-goYq8yPds1Gw" {
return nil
}
channels, err := s.daemon.ChannelList(nil, 1, 50)
channels, err := s.daemon.ChannelList(nil, 1, 500, nil)
if err != nil {
return err
} else if channels == nil {
return errors.Err("no channel response")
}
//special case for wallets we don't retain full control anymore
if len((*channels).Items) > 1 {
// This wallet is probably not under our control anymore but we still want to publish to it
// here we shall check if within all the channels there is one that was created by ytsync
SendInfoToSlack("we are dealing with a wallet that has multiple channels. This indicates that the wallet was probably transferred but we still want to sync their content. YoutubeID: %s", s.YoutubeChannelID)
if s.lbryChannelID == "" {
var channelToUse *jsonrpc.Transaction
if len((*channels).Items) > 0 {
if s.DbChannelData.ChannelClaimID == "" {
return errors.Err("this channel does not have a recorded claimID in the database. To prevent failures, updates are not supported until an entry is manually added in the database")
}
for _, c := range (*channels).Items {
if c.ClaimID != s.lbryChannelID {
if c.Name != s.LbryChannelName {
return errors.Err("the channel in the wallet is different than the channel in the database")
}
return nil // we have the ytsync channel and both the claimID and the channelName from the database are correct
log.Debugf("checking listed channel %s (%s)", c.ClaimID, c.Name)
if c.ClaimID != s.DbChannelData.ChannelClaimID {
continue
}
}
}
channelUsesOldMetadata := false
if len((*channels).Items) == 1 {
channel := ((*channels).Items)[0]
if channel.Name == s.LbryChannelName {
channelUsesOldMetadata = channel.Value.GetThumbnail() == nil
//TODO: eventually get rid of this when the whole db is filled
if s.lbryChannelID == "" {
err = s.Manager.apiConfig.SetChannelClaimID(s.YoutubeChannelID, channel.ClaimID)
} else if channel.ClaimID != s.lbryChannelID {
if c.Name != s.DbChannelData.DesiredChannelName {
return errors.Err("the channel in the wallet is different than the channel in the database")
}
s.lbryChannelID = channel.ClaimID
if !channelUsesOldMetadata {
return err
}
} else {
return errors.Err("this channel does not belong to this wallet! Expected: %s, found: %s", s.LbryChannelName, channel.Name)
channelToUse = &c
break
}
if channelToUse == nil {
return errors.Err("this wallet has channels but not a single one is ours! Expected claim_id: %s (%s)", s.DbChannelData.ChannelClaimID, s.DbChannelData.DesiredChannelName)
}
} else if s.DbChannelData.TransferState == shared.TransferStateComplete {
return errors.Err("the channel was transferred but appears to have been abandoned!")
} else if s.DbChannelData.ChannelClaimID != "" {
return errors.Err("the database has a channel recorded (%s) but nothing was found in our control", s.DbChannelData.ChannelClaimID)
}
channelBidAmount := channelClaimAmount
channelUsesOldMetadata := false
if channelToUse != nil {
channelUsesOldMetadata = channelToUse.Value.GetThumbnail() == nil || (len(channelToUse.Value.GetLanguages()) == 0 && s.DbChannelData.Language != "")
if !channelUsesOldMetadata {
return nil
}
}
balanceResp, err := s.daemon.AccountBalance(nil)
if err != nil {
@ -299,47 +387,43 @@ func (s *Sync) ensureChannelOwnership() error {
} else if balanceResp == nil {
return errors.Err("no response")
}
balance, err := decimal.NewFromString((string)(*balanceResp))
balance, err := decimal.NewFromString(balanceResp.Available.String())
if err != nil {
return errors.Err(err)
}
if balance.LessThan(decimal.NewFromFloat(channelBidAmount)) {
err = s.addCredits(channelBidAmount + 0.1)
if balance.LessThan(decimal.NewFromFloat(channelClaimAmount)) {
err = s.addCredits(channelClaimAmount + estimatedMaxTxFee*3)
if err != nil {
return err
}
}
client := &http.Client{
Transport: &transport.APIKey{Key: s.APIConfig.YoutubeAPIKey},
}
service, err := youtube.New(client)
channelInfo, err := ytapi.ChannelInfo(s.DbChannelData.ChannelId)
if err != nil {
return errors.Prefix("error creating YouTube service", err)
if strings.Contains(err.Error(), "invalid character 'e' looking for beginning of value") {
logUtils.SendInfoToSlack("failed to get channel data for %s. Waiting 1 minute to retry", s.DbChannelData.ChannelId)
time.Sleep(1 * time.Minute)
channelInfo, err = ytapi.ChannelInfo(s.DbChannelData.ChannelId)
if err != nil {
return err
}
} else {
return err
}
}
response, err := service.Channels.List("snippet,brandingSettings").Id(s.YoutubeChannelID).Do()
if err != nil {
return errors.Prefix("error getting channel details", err)
}
if len(response.Items) < 1 {
return errors.Err("youtube channel not found")
}
channelInfo := response.Items[0].Snippet
channelBranding := response.Items[0].BrandingSettings
thumbnail := thumbs.GetBestThumbnail(channelInfo.Thumbnails)
thumbnailURL, err := thumbs.MirrorThumbnail(thumbnail.Url, s.YoutubeChannelID, s.Manager.GetS3AWSConfig())
thumbnail := channelInfo.Header.C4TabbedHeaderRenderer.Avatar.Thumbnails[len(channelInfo.Header.C4TabbedHeaderRenderer.Avatar.Thumbnails)-1].URL
thumbnailURL, err := thumbs.MirrorThumbnail(thumbnail, s.DbChannelData.ChannelId)
if err != nil {
return err
}
var bannerURL *string
if channelBranding.Image != nil && channelBranding.Image.BannerImageUrl != "" {
bURL, err := thumbs.MirrorThumbnail(channelBranding.Image.BannerImageUrl, "banner-"+s.YoutubeChannelID, s.Manager.GetS3AWSConfig())
if channelInfo.Header.C4TabbedHeaderRenderer.Banner.Thumbnails != nil {
bURL, err := thumbs.MirrorThumbnail(channelInfo.Header.C4TabbedHeaderRenderer.Banner.Thumbnails[len(channelInfo.Header.C4TabbedHeaderRenderer.Banner.Thumbnails)-1].URL,
"banner-"+s.DbChannelData.ChannelId,
)
if err != nil {
return err
}
@ -347,87 +431,108 @@ func (s *Sync) ensureChannelOwnership() error {
}
var languages []string = nil
if channelInfo.DefaultLanguage != "" {
languages = []string{channelInfo.DefaultLanguage}
}
var locations []jsonrpc.Location = nil
if channelInfo.Country != "" {
locations = []jsonrpc.Location{{Country: util.PtrToString(channelInfo.Country)}}
}
var c *jsonrpc.TransactionSummary
if channelUsesOldMetadata {
c, err = s.daemon.ChannelUpdate(s.lbryChannelID, jsonrpc.ChannelUpdateOptions{
ClearTags: util.PtrToBool(true),
ClearLocations: util.PtrToBool(true),
ClearLanguages: util.PtrToBool(true),
ChannelCreateOptions: jsonrpc.ChannelCreateOptions{
ClaimCreateOptions: jsonrpc.ClaimCreateOptions{
Title: channelInfo.Title,
Description: channelInfo.Description,
Tags: tagsManager.GetTagsForChannel(s.YoutubeChannelID),
Languages: languages,
Locations: locations,
ThumbnailURL: &thumbnailURL,
},
CoverURL: bannerURL,
},
})
} else {
c, err = s.daemon.ChannelCreate(s.LbryChannelName, channelBidAmount, jsonrpc.ChannelCreateOptions{
ClaimCreateOptions: jsonrpc.ClaimCreateOptions{
Title: channelInfo.Title,
Description: channelInfo.Description,
Tags: tagsManager.GetTagsForChannel(s.YoutubeChannelID),
Languages: languages,
Locations: locations,
ThumbnailURL: &thumbnailURL,
},
CoverURL: bannerURL,
})
if s.DbChannelData.Language != "" {
languages = []string{s.DbChannelData.Language}
}
var locations []jsonrpc.Location = nil
if channelInfo.Topbar.DesktopTopbarRenderer.CountryCode != "" {
locations = []jsonrpc.Location{{Country: &channelInfo.Topbar.DesktopTopbarRenderer.CountryCode}}
}
var c *jsonrpc.TransactionSummary
var recoveredChannelClaimID string
claimCreateOptions := jsonrpc.ClaimCreateOptions{
Title: &channelInfo.Microformat.MicroformatDataRenderer.Title,
Description: &channelInfo.Metadata.ChannelMetadataRenderer.Description,
Tags: tags_manager.GetTagsForChannel(s.DbChannelData.ChannelId),
Languages: languages,
Locations: locations,
ThumbnailURL: &thumbnailURL,
}
if channelUsesOldMetadata {
da, err := s.getDefaultAccount()
if err != nil {
return err
}
if s.DbChannelData.TransferState <= 1 {
c, err = s.daemon.ChannelUpdate(s.DbChannelData.ChannelClaimID, jsonrpc.ChannelUpdateOptions{
ClearTags: util.PtrToBool(true),
ClearLocations: util.PtrToBool(true),
ClearLanguages: util.PtrToBool(true),
ChannelCreateOptions: jsonrpc.ChannelCreateOptions{
AccountID: &da,
FundingAccountIDs: []string{
da,
},
ClaimCreateOptions: claimCreateOptions,
CoverURL: bannerURL,
},
})
} else {
logUtils.SendInfoToSlack("%s (%s) has a channel with old metadata but isn't in our control anymore. Ignoring", s.DbChannelData.DesiredChannelName, s.DbChannelData.ChannelClaimID)
return nil
}
} else {
c, err = s.daemon.ChannelCreate(s.DbChannelData.DesiredChannelName, channelClaimAmount, jsonrpc.ChannelCreateOptions{
ClaimCreateOptions: claimCreateOptions,
CoverURL: bannerURL,
})
if err != nil {
claimId, err2 := s.getChannelClaimIDForTimedOutCreation()
if err2 != nil {
err = errors.Prefix(err2.Error(), err)
} else {
recoveredChannelClaimID = claimId
}
}
}
if err != nil {
return err
}
s.lbryChannelID = c.Outputs[0].ClaimID
return s.Manager.apiConfig.SetChannelClaimID(s.YoutubeChannelID, s.lbryChannelID)
if recoveredChannelClaimID != "" {
s.DbChannelData.ChannelClaimID = recoveredChannelClaimID
} else {
s.DbChannelData.ChannelClaimID = c.Outputs[0].ClaimID
}
return s.Manager.ApiConfig.SetChannelClaimID(s.DbChannelData.ChannelId, s.DbChannelData.ChannelClaimID)
}
func allUTXOsConfirmed(utxolist *jsonrpc.UTXOListResponse) bool {
if utxolist == nil {
return false
//getChannelClaimIDForTimedOutCreation is a raw function that returns the only channel that exists in the wallet
// this is used because the SDK sucks and can't figure out when to return when creating a claim...
func (s *Sync) getChannelClaimIDForTimedOutCreation() (string, error) {
channels, err := s.daemon.ChannelList(nil, 1, 500, nil)
if err != nil {
return "", err
} else if channels == nil {
return "", errors.Err("no channel response")
}
if len((*channels).Items) != 1 {
return "", errors.Err("more than one channel found when trying to recover from SDK failure in creating the channel")
}
desiredChannel := (*channels).Items[0]
if desiredChannel.Name != s.DbChannelData.DesiredChannelName {
return "", errors.Err("the channel found in the wallet has a different name than the one we expected")
}
if len(*utxolist) < 1 {
return false
}
for _, utxo := range *utxolist {
if utxo.Height == 0 {
return false
}
}
return true
return desiredChannel.ClaimID, nil
}
func (s *Sync) addCredits(amountToAdd float64) error {
start := time.Now()
defer func(start time.Time) {
timing.TimedComponent("addCredits").Add(time.Since(start))
}(start)
log.Printf("Adding %f credits", amountToAdd)
var lbrycrdd *lbrycrd.Client
var err error
if s.LbrycrdString == "" {
lbrycrdd, err = lbrycrd.NewWithDefaultURL()
if err != nil {
return err
}
} else {
lbrycrdd, err = lbrycrd.New(s.LbrycrdString)
if err != nil {
return err
}
lbrycrdd, err := logUtils.GetLbrycrdClient(s.Manager.LbrycrdDsn)
if err != nil {
return err
}
addressResp, err := s.daemon.AddressUnused(nil)
defaultAccount, err := s.getDefaultAccount()
if err != nil {
return err
}
addressResp, err := s.daemon.AddressUnused(&defaultAccount)
if err != nil {
return err
} else if addressResp == nil {

349
manager/transfer.go Normal file
View file

@ -0,0 +1,349 @@
package manager
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/jsonrpc"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/lbryio/ytsync/v5/shared"
"github.com/lbryio/ytsync/v5/timing"
log "github.com/sirupsen/logrus"
)
func waitConfirmations(s *Sync) error {
start := time.Now()
defer func(start time.Time) {
timing.TimedComponent("waitConfirmations").Add(time.Since(start))
}(start)
defaultAccount, err := s.getDefaultAccount()
if err != nil {
return err
}
allConfirmed := false
waitCount := 0
waiting:
for !allConfirmed && waitCount < 2 {
utxolist, err := s.daemon.UTXOList(&defaultAccount, 1, 10000)
if err != nil {
return err
} else if utxolist == nil {
return errors.Err("no response")
}
for _, utxo := range utxolist.Items {
if utxo.Confirmations <= 0 {
err = s.waitForNewBlock()
if err != nil {
return err
}
waitCount++
continue waiting
}
}
allConfirmed = true
}
return nil
}
type abandonResponse struct {
ClaimID string
Error error
Amount float64
}
func abandonSupports(s *Sync) (float64, error) {
start := time.Now()
defer func(start time.Time) {
timing.TimedComponent("abandonSupports").Add(time.Since(start))
}(start)
totalPages := uint64(1)
var allSupports []jsonrpc.Claim
defaultAccount, err := s.getDefaultAccount()
if err != nil {
return 0, err
}
for page := uint64(1); page <= totalPages; page++ {
supports, err := s.daemon.SupportList(&defaultAccount, page, 50)
if err != nil {
supports, err = s.daemon.SupportList(&defaultAccount, page, 50)
if err != nil {
return 0, errors.Prefix("cannot list supports", err)
}
}
allSupports = append(allSupports, (*supports).Items...)
totalPages = (*supports).TotalPages
}
producerWG := &stop.Group{}
claimIDChan := make(chan string, len(allSupports))
abandonRspChan := make(chan abandonResponse, len(allSupports))
alreadyAbandoned := make(map[string]bool, len(allSupports))
producerWG.Add(1)
go func() {
defer producerWG.Done()
for _, support := range allSupports {
_, ok := alreadyAbandoned[support.ClaimID]
if ok {
continue
}
alreadyAbandoned[support.ClaimID] = true
claimIDChan <- support.ClaimID
}
}()
consumerWG := &stop.Group{}
//TODO: remove this once the SDK team fixes their RPC bugs....
s.daemon.SetRPCTimeout(60 * time.Second)
defer s.daemon.SetRPCTimeout(5 * time.Minute)
for i := 0; i < s.Manager.CliFlags.ConcurrentJobs; i++ {
consumerWG.Add(1)
go func() {
defer consumerWG.Done()
outer:
for {
claimID, more := <-claimIDChan
if !more {
return
} else {
summary, err := s.daemon.TxoSpend(util.PtrToString("support"), &claimID, nil, nil, nil, &defaultAccount)
if err != nil {
if strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") {
log.Errorf("Support abandon for %s timed out, retrying...", claimID)
summary, err = s.daemon.TxoSpend(util.PtrToString("support"), &claimID, nil, nil, nil, &defaultAccount)
if err != nil {
//TODO GUESS HOW MUCH LBC WAS RELEASED THAT WE DON'T KNOW ABOUT, because screw you SDK
abandonRspChan <- abandonResponse{
ClaimID: claimID,
Error: err,
Amount: 0, // this is likely wrong, but oh well... there is literally nothing I can do about it
}
continue
}
} else {
abandonRspChan <- abandonResponse{
ClaimID: claimID,
Error: err,
Amount: 0,
}
continue
}
}
if summary == nil || len(*summary) < 1 {
abandonRspChan <- abandonResponse{
ClaimID: claimID,
Error: errors.Err("error abandoning supports: no outputs while abandoning %s", claimID),
Amount: 0,
}
continue
}
var outputAmount float64
for _, tx := range *summary {
amount, err := strconv.ParseFloat(tx.Outputs[0].Amount, 64)
if err != nil {
abandonRspChan <- abandonResponse{
ClaimID: claimID,
Error: errors.Err(err),
Amount: 0,
}
continue outer
}
outputAmount += amount
}
if err != nil {
abandonRspChan <- abandonResponse{
ClaimID: claimID,
Error: errors.Err(err),
Amount: 0,
}
continue
}
log.Infof("Abandoned supports of %.4f LBC for claim %s", outputAmount, claimID)
abandonRspChan <- abandonResponse{
ClaimID: claimID,
Error: nil,
Amount: outputAmount,
}
continue
}
}
}()
}
producerWG.Wait()
close(claimIDChan)
consumerWG.Wait()
close(abandonRspChan)
totalAbandoned := 0.0
for r := range abandonRspChan {
if r.Error != nil {
log.Errorf("Failed abandoning supports for %s: %s", r.ClaimID, r.Error.Error())
continue
}
totalAbandoned += r.Amount
}
return totalAbandoned, nil
}
type updateInfo struct {
ClaimID string
streamUpdateOptions *jsonrpc.StreamUpdateOptions
videoStatus *shared.VideoStatus
}
func transferVideos(s *Sync) error {
start := time.Now()
defer func(start time.Time) {
timing.TimedComponent("transferVideos").Add(time.Since(start))
}(start)
cleanTransfer := true
streamChan := make(chan updateInfo, s.Manager.CliFlags.ConcurrentJobs)
account, err := s.getDefaultAccount()
if err != nil {
return err
}
streams, err := s.daemon.StreamList(&account, 1, 30000)
if err != nil {
return errors.Err(err)
}
producerWG := &stop.Group{}
producerWG.Add(1)
go func() {
defer producerWG.Done()
for _, video := range s.syncedVideos {
if !video.Published || video.Transferred || video.MetadataVersion != shared.LatestMetadataVersion {
continue
}
var stream *jsonrpc.Claim = nil
for _, c := range streams.Items {
if c.ClaimID != video.ClaimID || (c.SigningChannel != nil && c.SigningChannel.ClaimID != s.DbChannelData.ChannelClaimID) {
continue
}
stream = &c
break
}
if stream == nil {
return
}
streamUpdateOptions := jsonrpc.StreamUpdateOptions{
StreamCreateOptions: &jsonrpc.StreamCreateOptions{
ClaimCreateOptions: jsonrpc.ClaimCreateOptions{
ClaimAddress: &s.DbChannelData.PublishAddress.Address,
FundingAccountIDs: []string{
account,
},
},
},
Bid: util.PtrToString(fmt.Sprintf("%.5f", publishAmount/2.)),
}
videoStatus := shared.VideoStatus{
ChannelID: s.DbChannelData.ChannelId,
VideoID: video.VideoID,
ClaimID: video.ClaimID,
ClaimName: video.ClaimName,
Status: shared.VideoStatusPublished,
IsTransferred: util.PtrToBool(true),
}
streamChan <- updateInfo{
ClaimID: video.ClaimID,
streamUpdateOptions: &streamUpdateOptions,
videoStatus: &videoStatus,
}
}
}()
consumerWG := &stop.Group{}
for i := 0; i < s.Manager.CliFlags.ConcurrentJobs; i++ {
consumerWG.Add(1)
go func(worker int) {
defer consumerWG.Done()
for {
ui, more := <-streamChan
if !more {
return
} else {
err := s.streamUpdate(&ui)
if err != nil {
cleanTransfer = false
}
}
}
}(i)
}
producerWG.Wait()
close(streamChan)
consumerWG.Wait()
if !cleanTransfer {
return errors.Err("A video has failed to transfer for the channel...skipping channel transfer")
}
return nil
}
func (s *Sync) streamUpdate(ui *updateInfo) error {
start := time.Now()
result, updateError := s.daemon.StreamUpdate(ui.ClaimID, *ui.streamUpdateOptions)
timing.TimedComponent("transferStreamUpdate").Add(time.Since(start))
if updateError != nil {
ui.videoStatus.FailureReason = updateError.Error()
ui.videoStatus.Status = shared.VideoStatusTransferFailed
ui.videoStatus.IsTransferred = util.PtrToBool(false)
} else {
ui.videoStatus.IsTransferred = util.PtrToBool(len(result.Outputs) != 0)
}
log.Infof("TRANSFERRED %t", *ui.videoStatus.IsTransferred)
statusErr := s.Manager.ApiConfig.MarkVideoStatus(*ui.videoStatus)
if statusErr != nil {
return errors.Prefix(statusErr.Error(), updateError)
}
return errors.Err(updateError)
}
func transferChannel(s *Sync) error {
start := time.Now()
defer func(start time.Time) {
timing.TimedComponent("transferChannel").Add(time.Since(start))
}(start)
account, err := s.getDefaultAccount()
if err != nil {
return err
}
channelClaims, err := s.daemon.ChannelList(&account, 1, 50, nil)
if err != nil {
return errors.Err(err)
}
var channelClaim *jsonrpc.Transaction = nil
for _, c := range channelClaims.Items {
if c.ClaimID != s.DbChannelData.ChannelClaimID {
continue
}
channelClaim = &c
break
}
if channelClaim == nil {
return nil
}
updateOptions := jsonrpc.ChannelUpdateOptions{
Bid: util.PtrToString(fmt.Sprintf("%.6f", channelClaimAmount-0.005)),
ChannelCreateOptions: jsonrpc.ChannelCreateOptions{
ClaimCreateOptions: jsonrpc.ClaimCreateOptions{
ClaimAddress: &s.DbChannelData.PublishAddress.Address,
},
},
}
result, err := s.daemon.ChannelUpdate(s.DbChannelData.ChannelClaimID, updateOptions)
if err != nil {
return errors.Err(err)
}
log.Infof("TRANSFERRED %t", len(result.Outputs) != 0)
return nil
}

File diff suppressed because it is too large Load diff

17
metrics/metrics.go Normal file
View file

@ -0,0 +1,17 @@
package metrics
import (
"github.com/lbryio/ytsync/v5/configs"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
Durations = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "ytsync",
Subsystem: configs.Configuration.GetHostname(),
Name: "duration",
Help: "The durations of the individual modules",
}, []string{"path"})
)

View file

@ -10,7 +10,7 @@ import (
"sync"
)
var titleRegexp = regexp.MustCompile(`[^a-zA-Z0-9]+`)
var claimNameRegexp = regexp.MustCompile(`[=&#:$@%?;、\\"/<>%{}|^~\x60[\]\s]`)
type Namer struct {
mu *sync.Mutex
@ -43,9 +43,16 @@ func (n *Namer) GetNextName(prefix string) string {
}
//if for some reasons the title can't be converted in a valid claim name (too short or not latin) then we use a hash
attempt = 1
if len(name) < 2 {
sum := md5.Sum([]byte(prefix))
name = fmt.Sprintf("%s-%d", hex.EncodeToString(sum[:])[:15], attempt)
for {
name = fmt.Sprintf("%s-%d", hex.EncodeToString(sum[:])[:15], attempt)
if _, exists := n.names[name]; !exists {
break
}
attempt++
}
}
n.names[name] = true
@ -61,18 +68,21 @@ func getClaimNameFromTitle(title string, attempt int) string {
}
maxLen := 40 - len(suffix)
chunks := strings.Split(strings.ToLower(strings.Trim(titleRegexp.ReplaceAllString(title, "-"), "-")), "-")
chunks := strings.Split(strings.ToLower(strings.Trim(claimNameRegexp.ReplaceAllString(title, "-"), "-")), "-")
name := chunks[0]
if len(name) > maxLen {
return name[:maxLen]
return truncateUnicode(name, maxLen) + suffix
}
for _, chunk := range chunks[1:] {
if chunk == "" {
continue
}
tmpName := name + "-" + chunk
if len(tmpName) > maxLen {
if len(name) < 20 {
name = tmpName[:maxLen]
name = truncateUnicode(tmpName, maxLen-len(name))
}
break
}
@ -81,3 +91,18 @@ func getClaimNameFromTitle(title string, attempt int) string {
return name + suffix
}
func truncateUnicode(name string, limit int) string {
reNameBlacklist := regexp.MustCompile(`(&|>|<|\/|:|\n|\r)*`)
name = reNameBlacklist.ReplaceAllString(name, "")
result := name
chars := 0
for i := range name {
if chars >= limit {
result = name[:i]
break
}
chars++
}
return result
}

28
namer/names_test.go Normal file
View file

@ -0,0 +1,28 @@
package namer
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_getClaimNameFromTitle(t *testing.T) {
name := getClaimNameFromTitle("СтопХам - \"В ожидании ответа\"", 0)
assert.Equal(t, "стопхам-в-ожидании", name)
name = getClaimNameFromTitle("SADB - \"A Weak Woman With a Strong Hood\"", 0)
assert.Equal(t, "sadb-a-weak-woman-with-a-strong-hood", name)
name = getClaimNameFromTitle("錢包整理術 5 Tips、哪種錢包最NG有錢人默默在做的「錢包整理術」 ft.@SHIN LI", 0)
assert.Equal(t, "錢包整理術-5-tips-哪種錢包最ng", name)
name = getClaimNameFromTitle("اسرع-طريقة-لتختيم", 0)
assert.Equal(t, "اسرع-طريقة-لتختيم", name)
name = getClaimNameFromTitle("شكرا على 380 مشترك😍😍😍😍 لي يريد دعم ادا وصلنا المقطع 40 لايك وراح ادعم قناتين", 0)
assert.Equal(t, "شكرا-على-380-مشترك😍😍😍", name)
name = getClaimNameFromTitle("test-@", 0)
assert.Equal(t, "test", name)
name = getClaimNameFromTitle("『あなたはただの空の殻でした』", 0)
assert.Equal(t, "『あなたはただの空の殻でした』", name)
name = getClaimNameFromTitle("精靈樂章-這樣的夥伴沒問題嗎 幽暗隕石坑(夢魘) 王有無敵狀態...要會閃不然會被秒(無課)", 2)
assert.Equal(t, "精靈樂章-這樣的夥伴沒問題嗎-2", name)
name = getClaimNameFromTitle("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 50)
assert.Equal(t, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-50", name)
}

View file

@ -11,8 +11,12 @@ import (
"strings"
"time"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/extras/null"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/null"
"github.com/lbryio/ytsync/v5/configs"
"github.com/lbryio/ytsync/v5/shared"
"github.com/lbryio/ytsync/v5/util"
log "github.com/sirupsen/logrus"
)
@ -22,52 +26,57 @@ const (
)
type APIConfig struct {
YoutubeAPIKey string
ApiURL string
ApiToken string
HostName string
ApiURL string
ApiToken string
HostName string
}
type SyncProperties struct {
SyncFrom int64
SyncUntil int64
YoutubeChannelID string
var instance *APIConfig
func GetAPIsConfigs() *APIConfig {
if instance == nil {
instance = &APIConfig{
ApiURL: configs.Configuration.InternalApisEndpoint,
ApiToken: configs.Configuration.InternalApisAuthToken,
HostName: configs.Configuration.GetHostname(),
}
}
return instance
}
type YoutubeChannel struct {
ChannelId string `json:"channel_id"`
TotalVideos uint `json:"total_videos"`
DesiredChannelName string `json:"desired_channel_name"`
Fee *struct {
Amount string `json:"amount"`
Address string `json:"address"`
Currency string `json:"currency"`
} `json:"fee"`
ChannelClaimID string `json:"channel_claim_id"`
}
func (a *APIConfig) FetchChannels(status string, cp *SyncProperties) ([]YoutubeChannel, error) {
func (a *APIConfig) FetchChannels(status string, cliFlags *shared.SyncFlags) ([]shared.YoutubeChannel, error) {
type apiJobsResponse struct {
Success bool `json:"success"`
Error null.String `json:"error"`
Data []YoutubeChannel `json:"data"`
Success bool `json:"success"`
Error null.String `json:"error"`
Data []shared.YoutubeChannel `json:"data"`
}
endpoint := a.ApiURL + "/yt/jobs"
res, _ := http.PostForm(endpoint, url.Values{
res, err := http.PostForm(endpoint, url.Values{
"auth_token": {a.ApiToken},
"sync_status": {status},
"min_videos": {strconv.Itoa(1)},
"after": {strconv.Itoa(int(cp.SyncFrom))},
"before": {strconv.Itoa(int(cp.SyncUntil))},
"after": {strconv.Itoa(int(cliFlags.SyncFrom))},
"before": {strconv.Itoa(int(cliFlags.SyncUntil))},
"sync_server": {a.HostName},
"channel_id": {cp.YoutubeChannelID},
"channel_id": {cliFlags.ChannelID},
})
if err != nil {
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
time.Sleep(30 * time.Second)
return a.FetchChannels(status, cliFlags)
}
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != http.StatusOK {
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
log.Debugln(string(body))
time.Sleep(30 * time.Second)
return a.FetchChannels(status, cliFlags)
}
var response apiJobsResponse
err := json.Unmarshal(body, &response)
err = json.Unmarshal(body, &response)
if err != nil {
return nil, err
return nil, errors.Err(err)
}
if response.Data == nil {
return nil, errors.Err(response.Error)
@ -84,6 +93,8 @@ type SyncedVideo struct {
ClaimID string `json:"claim_id"`
Size int64 `json:"size"`
MetadataVersion int8 `json:"metadata_version"`
Transferred bool `json:"transferred"`
IsLbryFirst bool `json:"is_lbry_first"`
}
func sanitizeFailureReason(s *string) {
@ -94,7 +105,47 @@ func sanitizeFailureReason(s *string) {
*s = (*s)[:MaxReasonLength]
}
}
func (a *APIConfig) SetChannelStatus(channelID string, status string, failureReason string) (map[string]SyncedVideo, map[string]bool, error) {
func (a *APIConfig) SetChannelCert(certHex string, channelID string) error {
type apiSetChannelCertResponse struct {
Success bool `json:"success"`
Error null.String `json:"error"`
Data string `json:"data"`
}
endpoint := a.ApiURL + "/yt/channel_cert"
res, err := http.PostForm(endpoint, url.Values{
"channel_claim_id": {channelID},
"channel_cert": {certHex},
"auth_token": {a.ApiToken},
})
if err != nil {
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
time.Sleep(30 * time.Second)
return a.SetChannelCert(certHex, channelID)
}
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != http.StatusOK {
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
log.Debugln(string(body))
time.Sleep(30 * time.Second)
return a.SetChannelCert(certHex, channelID)
}
var response apiSetChannelCertResponse
err = json.Unmarshal(body, &response)
if err != nil {
return errors.Err(err)
}
if !response.Error.IsNull() {
return errors.Err(response.Error.String)
}
return nil
}
func (a *APIConfig) SetChannelStatus(channelID string, status string, failureReason string, transferState *int) (map[string]SyncedVideo, map[string]bool, error) {
type apiChannelStatusResponse struct {
Success bool `json:"success"`
Error null.String `json:"error"`
@ -103,19 +154,34 @@ func (a *APIConfig) SetChannelStatus(channelID string, status string, failureRea
endpoint := a.ApiURL + "/yt/channel_status"
sanitizeFailureReason(&failureReason)
res, _ := http.PostForm(endpoint, url.Values{
params := url.Values{
"channel_id": {channelID},
"sync_server": {a.HostName},
"auth_token": {a.ApiToken},
"sync_status": {status},
"failure_reason": {failureReason},
})
}
if transferState != nil {
params.Add("transfer_state", strconv.Itoa(*transferState))
}
res, err := http.PostForm(endpoint, params)
if err != nil {
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
time.Sleep(30 * time.Second)
return a.SetChannelStatus(channelID, status, failureReason, transferState)
}
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode >= http.StatusInternalServerError {
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
log.Debugln(string(body))
time.Sleep(30 * time.Second)
return a.SetChannelStatus(channelID, status, failureReason, transferState)
}
var response apiChannelStatusResponse
err := json.Unmarshal(body, &response)
err = json.Unmarshal(body, &response)
if err != nil {
return nil, nil, err
return nil, nil, errors.Err(err)
}
if !response.Error.IsNull() {
return nil, nil, errors.Err(response.Error.String)
@ -141,15 +207,26 @@ func (a *APIConfig) SetChannelClaimID(channelID string, channelClaimID string) e
Data string `json:"data"`
}
endpoint := a.ApiURL + "/yt/set_channel_claim_id"
res, _ := http.PostForm(endpoint, url.Values{
res, err := http.PostForm(endpoint, url.Values{
"channel_id": {channelID},
"auth_token": {a.ApiToken},
"channel_claim_id": {channelClaimID},
})
if err != nil {
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
time.Sleep(30 * time.Second)
return a.SetChannelClaimID(channelID, channelClaimID)
}
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != http.StatusOK {
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
log.Debugln(string(body))
time.Sleep(30 * time.Second)
return a.SetChannelClaimID(channelID, channelClaimID)
}
var response apiChannelStatusResponse
err := json.Unmarshal(body, &response)
err = json.Unmarshal(body, &response)
if err != nil {
return errors.Err(err)
}
@ -163,44 +240,101 @@ func (a *APIConfig) SetChannelClaimID(channelID string, channelClaimID string) e
}
const (
VideoStatusPublished = "published"
VideoStatusFailed = "failed"
VideoStatusPublished = "published"
VideoStatusUpgradeFailed = "upgradefailed"
VideoStatusFailed = "failed"
)
func (a *APIConfig) MarkVideoStatus(channelID string, videoID string, status string, claimID string, claimName string, failureReason string, size *int64, metadataVersion uint) error {
endpoint := a.ApiURL + "/yt/video_status"
sanitizeFailureReason(&failureReason)
func (a *APIConfig) DeleteVideos(videos []string) error {
endpoint := a.ApiURL + "/yt/video_delete"
videoIDs := strings.Join(videos, ",")
vals := url.Values{
"youtube_channel_id": {channelID},
"video_id": {videoID},
"status": {status},
"auth_token": {a.ApiToken},
"video_ids": {videoIDs},
"auth_token": {a.ApiToken},
}
if status == VideoStatusPublished {
if claimID == "" || claimName == "" {
return errors.Err("claimID or claimName missing")
}
vals.Add("published_at", strconv.FormatInt(time.Now().Unix(), 10))
vals.Add("claim_id", claimID)
vals.Add("claim_name", claimName)
vals.Add("metadata_version", fmt.Sprintf("%d", metadataVersion))
if size != nil {
vals.Add("size", strconv.FormatInt(*size, 10))
}
res, err := http.PostForm(endpoint, vals)
if err != nil {
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
time.Sleep(30 * time.Second)
return a.DeleteVideos(videos)
}
if failureReason != "" {
vals.Add("failure_reason", failureReason)
}
res, _ := http.PostForm(endpoint, vals)
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != http.StatusOK {
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
log.Debugln(string(body))
time.Sleep(30 * time.Second)
return a.DeleteVideos(videos)
}
var response struct {
Success bool `json:"success"`
Error null.String `json:"error"`
Data null.String `json:"data"`
}
err := json.Unmarshal(body, &response)
err = json.Unmarshal(body, &response)
if err != nil {
return errors.Err(err)
}
if !response.Error.IsNull() {
return errors.Err(response.Error.String)
}
if !response.Data.IsNull() && response.Data.String == "ok" {
return nil
}
return errors.Err("invalid API response. Status code: %d", res.StatusCode)
}
func (a *APIConfig) MarkVideoStatus(status shared.VideoStatus) error {
endpoint := a.ApiURL + "/yt/video_status"
sanitizeFailureReason(&status.FailureReason)
vals := url.Values{
"youtube_channel_id": {status.ChannelID},
"video_id": {status.VideoID},
"status": {status.Status},
"auth_token": {a.ApiToken},
}
if status.Status == VideoStatusPublished || status.Status == VideoStatusUpgradeFailed {
if status.ClaimID == "" || status.ClaimName == "" {
return errors.Err("claimID (%s) or claimName (%s) missing", status.ClaimID, status.ClaimName)
}
vals.Add("published_at", strconv.FormatInt(time.Now().Unix(), 10))
vals.Add("claim_id", status.ClaimID)
vals.Add("claim_name", status.ClaimName)
if status.MetaDataVersion > 0 {
vals.Add("metadata_version", fmt.Sprintf("%d", status.MetaDataVersion))
}
if status.Size != nil {
vals.Add("size", strconv.FormatInt(*status.Size, 10))
}
}
if status.FailureReason != "" {
vals.Add("failure_reason", status.FailureReason)
}
if status.IsTransferred != nil {
vals.Add("transferred", strconv.FormatBool(*status.IsTransferred))
}
res, err := http.PostForm(endpoint, vals)
if err != nil {
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
time.Sleep(30 * time.Second)
return a.MarkVideoStatus(status)
}
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode != http.StatusOK {
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
log.Debugln(string(body))
time.Sleep(30 * time.Second)
return a.MarkVideoStatus(status)
}
var response struct {
Success bool `json:"success"`
Error null.String `json:"error"`
Data null.String `json:"data"`
}
err = json.Unmarshal(body, &response)
if err != nil {
return err
}
@ -212,3 +346,96 @@ func (a *APIConfig) MarkVideoStatus(channelID string, videoID string, status str
}
return errors.Err("invalid API response. Status code: %d", res.StatusCode)
}
func (a *APIConfig) VideoState(videoID string) (string, error) {
endpoint := a.ApiURL + "/yt/video_state"
vals := url.Values{
"video_id": {videoID},
"auth_token": {a.ApiToken},
}
res, err := http.PostForm(endpoint, vals)
if err != nil {
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
time.Sleep(30 * time.Second)
return a.VideoState(videoID)
}
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode == http.StatusNotFound {
return "not_found", nil
}
if res.StatusCode != http.StatusOK {
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
log.Debugln(string(body))
time.Sleep(30 * time.Second)
return a.VideoState(videoID)
}
var response struct {
Success bool `json:"success"`
Error null.String `json:"error"`
Data null.String `json:"data"`
}
err = json.Unmarshal(body, &response)
if err != nil {
return "", errors.Err(err)
}
if !response.Error.IsNull() {
return "", errors.Err(response.Error.String)
}
if !response.Data.IsNull() {
return response.Data.String, nil
}
return "", errors.Err("invalid API response. Status code: %d", res.StatusCode)
}
type VideoRelease struct {
ID uint64 `json:"id"`
YoutubeDataID uint64 `json:"youtube_data_id"`
VideoID string `json:"video_id"`
ReleaseTime string `json:"release_time"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
}
func (a *APIConfig) GetReleasedDate(videoID string) (*VideoRelease, error) {
endpoint := a.ApiURL + "/yt/released"
vals := url.Values{
"video_id": {videoID},
"auth_token": {a.ApiToken},
}
res, err := http.PostForm(endpoint, vals)
if err != nil {
util.SendErrorToSlack("error while trying to call %s. Waiting to retry: %s", endpoint, err.Error())
time.Sleep(30 * time.Second)
return a.GetReleasedDate(videoID)
}
defer res.Body.Close()
body, _ := ioutil.ReadAll(res.Body)
if res.StatusCode == http.StatusNotFound {
return nil, nil
}
if res.StatusCode != http.StatusOK {
util.SendErrorToSlack("Error %d while trying to call %s. Waiting to retry", res.StatusCode, endpoint)
log.Debugln(string(body))
time.Sleep(30 * time.Second)
return a.GetReleasedDate(videoID)
}
var response struct {
Success bool `json:"success"`
Error null.String `json:"error"`
Data VideoRelease `json:"data"`
}
err = json.Unmarshal(body, &response)
if err != nil {
return nil, errors.Err(err)
}
if !response.Error.IsNull() {
return nil, errors.Err(response.Error.String)
}
if response.Data.ReleaseTime != "" {
return &response.Data, nil
}
return nil, errors.Err("invalid API response. Status code: %d", res.StatusCode)
}

221
shared/shared.go Normal file
View file

@ -0,0 +1,221 @@
package shared
import (
"encoding/json"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
)
type Fee struct {
Amount string `json:"amount"`
Address string `json:"address"`
Currency string `json:"currency"`
}
type YoutubeChannel struct {
ChannelId string `json:"channel_id"`
TotalVideos uint `json:"total_videos"`
TotalSubscribers uint `json:"total_subscribers"`
DesiredChannelName string `json:"desired_channel_name"`
Fee *Fee `json:"fee"`
ChannelClaimID string `json:"channel_claim_id"`
TransferState int `json:"transfer_state"`
PublishAddress PublishAddress `json:"publish_address"`
PublicKey string `json:"public_key"`
LengthLimit int `json:"length_limit"`
SizeLimit int `json:"size_limit"`
LastUploadedVideo string `json:"last_uploaded_video"`
WipeDB bool `json:"wipe_db"`
Language string `json:"language"`
}
type PublishAddress struct {
Address string `json:"address"`
IsMine bool `json:"is_mine"`
}
func (p *PublishAddress) UnmarshalJSON(data []byte) error {
var s string
if err := json.Unmarshal(data, &s); err != nil {
return errors.Err(err)
}
p.Address = s
p.IsMine = false
return nil
}
var FatalErrors = []string{
":5279: read: connection reset by peer",
"no space left on device",
"NotEnoughFunds",
"Cannot publish using channel",
"cannot concatenate 'str' and 'NoneType' objects",
"more than 90% of the space has been used.",
"Couldn't find private key for id",
"You already have a stream claim published under the name",
"Missing inputs",
}
var ErrorsNoRetry = []string{
"Requested format is not available",
"non 200 status code received",
"This video contains content from",
"dont know which claim to update",
"uploader has not made this video available in your country",
"download error: AccessDenied: Access Denied",
"Playback on other websites has been disabled by the video owner",
"Error in daemon: Cannot publish empty file",
"Error extracting sts from embedded url response",
"Unable to extract signature tokens",
"Client.Timeout exceeded while awaiting headers",
"the video is too big to sync, skipping for now",
"video is too long to process",
"video is too short to process",
"no compatible format available for this video",
"Watch this video on YouTube.",
"have blocked it on copyright grounds",
"the video must be republished as we can't get the right size",
"HTTP Error 403",
"giving up after 0 fragment retries",
"Sorry about that",
"This video is not available",
"Video unavailable",
"requested format not available",
"interrupted by user",
"Sign in to confirm your age",
"This video is unavailable",
"video is a live stream and hasn't completed yet",
"Premieres in",
"Private video",
"This live event will begin in",
"This video has been removed by the uploader",
"Premiere will begin shortly",
"cannot unmarshal number 0.0",
"default youtube thumbnail found",
"livestream is likely bugged",
}
var WalletErrors = []string{
"Not enough funds to cover this transaction",
"failed: Not enough funds",
"Error in daemon: Insufficient funds, please deposit additional LBC",
//"Missing inputs",
}
var BlockchainErrors = []string{
"txn-mempool-conflict",
"too-long-mempool-chain",
}
var NeverRetryFailures = []string{
"Error extracting sts from embedded url response",
"Unable to extract signature tokens",
"the video is too big to sync, skipping for now",
"video is too long to process",
"video is too short to process",
"This video contains content from",
"no compatible format available for this video",
"Watch this video on YouTube.",
"have blocked it on copyright grounds",
"giving up after 0 fragment retries",
"Sign in to confirm your age",
"Playback on other websites has been disabled by the video owner",
"uploader has not made this video available in your country",
"This video has been removed by the uploader",
"Video unavailable",
"Video is not available - hardcoded fix",
}
type SyncFlags struct {
TakeOverExistingChannel bool
SkipSpaceCheck bool
SyncUpdate bool
SingleRun bool
RemoveDBUnpublished bool
UpgradeMetadata bool
DisableTransfers bool
QuickSync bool
MaxTries int
Refill int
Limit int
Status string
SecondaryStatus string
ChannelID string
SyncFrom int64
SyncUntil int64
ConcurrentJobs int
VideosLimit int
MaxVideoSize int
MaxVideoLength time.Duration
}
// VideosToSync dynamically figures out how many videos should be synced for a given subs count if nothing was otherwise specified
func (f *SyncFlags) VideosToSync(totalSubscribers uint) int {
if f.VideosLimit > 0 {
return f.VideosLimit
}
defaultVideosToSync := map[int]int{
10000: 1000,
5000: 500,
1000: 400,
800: 250,
600: 200,
200: 80,
100: 20,
1: 10,
}
videosToSync := 0
for s, r := range defaultVideosToSync {
if int(totalSubscribers) >= s && r > videosToSync {
videosToSync = r
}
}
return videosToSync
}
func (f *SyncFlags) IsSingleChannelSync() bool {
return f.ChannelID != ""
}
type VideoStatus struct {
ChannelID string
VideoID string
Status string
ClaimID string
ClaimName string
FailureReason string
Size *int64
MetaDataVersion uint
IsTransferred *bool
}
const (
StatusPending = "pending" // waiting for permission to sync
StatusPendingEmail = "pendingemail" // permission granted but missing email
StatusQueued = "queued" // in sync queue. will be synced soon
StatusPendingUpgrade = "pendingupgrade" // in sync queue. will be synced soon
StatusSyncing = "syncing" // syncing now
StatusSynced = "synced" // done
StatusWipeDb = "pendingdbwipe" // in sync queue. lbryum database will be pruned
StatusFailed = "failed"
StatusFinalized = "finalized" // no more changes allowed
StatusAbandoned = "abandoned" // deleted on youtube or banned
StatusAgeRestricted = "agerestricted" // one or more videos are age restricted and should be reprocessed with special keys
)
var SyncStatuses = []string{StatusPending, StatusPendingEmail, StatusPendingUpgrade, StatusQueued, StatusSyncing, StatusSynced, StatusFailed, StatusFinalized, StatusAbandoned, StatusWipeDb, StatusAgeRestricted}
const LatestMetadataVersion = 2
const (
VideoStatusPublished = "published"
VideoStatusFailed = "failed"
VideoStatusUpgradeFailed = "upgradefailed"
VideoStatusUnpublished = "unpublished"
VideoStatusTransferFailed = "transferfailed"
)
var VideoSyncStatuses = []string{VideoStatusPublished, VideoStatusFailed, VideoStatusUpgradeFailed, VideoStatusUnpublished, VideoStatusTransferFailed}
const (
TransferStateNotTouched = iota
TransferStatePending
TransferStateComplete
TransferStateManual
)

20
shared/shared_test.go Normal file
View file

@ -0,0 +1,20 @@
package shared
import (
"testing"
"gotest.tools/assert"
)
func TestSyncFlags_VideosToSync(t *testing.T) {
f := SyncFlags{}
assert.Equal(t, f.VideosToSync(0), 0)
assert.Equal(t, f.VideosToSync(1), 10)
assert.Equal(t, f.VideosToSync(5), 10)
assert.Equal(t, f.VideosToSync(10), 10)
assert.Equal(t, f.VideosToSync(101), 50)
assert.Equal(t, f.VideosToSync(500), 80)
assert.Equal(t, f.VideosToSync(21000), 1000)
f.VideosLimit = 1337
assert.Equal(t, f.VideosToSync(21), 1337)
}

View file

@ -2,9 +2,10 @@ package sources
import (
"strings"
"sync"
"github.com/lbryio/lbry.go/extras/jsonrpc"
"github.com/lbryio/ytsync/namer"
"github.com/lbryio/lbry.go/v2/extras/jsonrpc"
"github.com/lbryio/ytsync/v5/namer"
)
type SyncSummary struct {
@ -12,7 +13,9 @@ type SyncSummary struct {
ClaimName string
}
func publishAndRetryExistingNames(daemon *jsonrpc.Client, title, filename string, amount float64, options jsonrpc.StreamCreateOptions, namer *namer.Namer) (*SyncSummary, error) {
func publishAndRetryExistingNames(daemon *jsonrpc.Client, title, filename string, amount float64, options jsonrpc.StreamCreateOptions, namer *namer.Namer, walletLock *sync.RWMutex) (*SyncSummary, error) {
walletLock.RLock()
defer walletLock.RUnlock()
for {
name := namer.GetNextName(title)
response, err := daemon.StreamCreate(name, filename, amount, options)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,53 @@
package sources
import (
"regexp"
"testing"
"github.com/abadojack/whatlanggo"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
func TestLanguageDetection(t *testing.T) {
description := `Om lättkränkta muslimer, och den bristande logiken i vad som anses vara att vanära profeten. Från Moderata riksdagspolitikern Hanif Balis podcast "God Ton", avsnitt 108, från oktober 2020, efter terrordådet där en fransk lärare fick huvudet avskuret efter att undervisat sin mångkulturella klass om frihet.`
info := whatlanggo.Detect(description)
logrus.Infof("confidence: %.2f", info.Confidence)
assert.True(t, info.IsReliable())
assert.True(t, info.Lang.Iso6391() != "")
assert.Equal(t, "sv", info.Lang.Iso6391())
description = `🥳週四直播 | 晚上來開個賽車🔰歡迎各位一起來玩! - PonPonLin蹦蹦林`
info = whatlanggo.Detect(description)
logrus.Infof("confidence: %.2f", info.Confidence)
assert.True(t, info.IsReliable())
assert.True(t, info.Lang.Iso6391() != "")
assert.Equal(t, "zh", info.Lang.Iso6391())
description = `成為這個頻道的會員並獲得獎勵
https://www.youtube.com/channel/UCOQFrooz-YGHjYb7s3-MrsQ/join
_____________________________________________
想聽我既音樂作品可以去下面LINK
streetvoice 街聲
https://streetvoice.com/CTLam331/
_____________________________________________
想學結他鋼琴
有關音樂制作工作
都可以搵我
大家快D訂閱喇
不定期出片
Website: http://ctlam331.wixsite.com/ctlamusic
FB PAGEhttps://www.facebook.com/ctlam331
IGctlamusic`
urlsRegex := regexp.MustCompile(`(?m) ?(f|ht)(tp)(s?)(://)(.*)[.|/](.*)`)
descriptionSample := urlsRegex.ReplaceAllString(description, "")
info = whatlanggo.Detect(descriptionSample)
logrus.Infof("confidence: %.2f", info.Confidence)
assert.True(t, info.IsReliable())
assert.True(t, info.Lang.Iso6391() != "")
assert.Equal(t, "zh", info.Lang.Iso6391())
}

View file

@ -1,152 +0,0 @@
import os
import sys
from decimal import Decimal
from bitcoinrpc.authproxy import AuthServiceProxy
from lbryum.wallet import Wallet, WalletStorage
from lbryum.commands import known_commands, Commands
from lbryum.simple_config import SimpleConfig
from lbryum.blockchain import get_blockchain
from lbryum.network import Network
def get_lbrycrdd_connection_string(wallet_conf):
settings = {"username": "rpcuser",
"password": "rpcpassword",
"rpc_port": 9245}
if wallet_conf and os.path.exists(wallet_conf):
with open(wallet_conf, "r") as conf:
conf_lines = conf.readlines()
for l in conf_lines:
if l.startswith("rpcuser="):
settings["username"] = l[8:].rstrip('\n')
if l.startswith("rpcpassword="):
settings["password"] = l[12:].rstrip('\n')
if l.startswith("rpcport="):
settings["rpc_port"] = int(l[8:].rstrip('\n'))
rpc_user = settings["username"]
rpc_pass = settings["password"]
rpc_port = settings["rpc_port"]
rpc_url = "127.0.0.1"
return "http://%s:%s@%s:%i" % (rpc_user, rpc_pass, rpc_url, rpc_port)
class LBRYumWallet(object):
def __init__(self, lbryum_path):
self.config = SimpleConfig()
self.config.set_key('chain', 'lbrycrd_main')
self.storage = WalletStorage(lbryum_path)
self.wallet = Wallet(self.storage)
self.cmd_runner = Commands(self.config, self.wallet, None)
if not self.wallet.has_seed():
seed = self.wallet.make_seed()
self.wallet.add_seed(seed, "derp")
self.wallet.create_master_keys("derp")
self.wallet.create_main_account()
self.wallet.update_password("derp", "")
self.network = Network(self.config)
self.blockchain = get_blockchain(self.config, self.network)
print self.config.get('chain'), self.blockchain
self.wallet.storage.write()
def command(self, command_name, *args, **kwargs):
cmd_runner = Commands(self.config, self.wallet, None)
cmd = known_commands[command_name]
func = getattr(cmd_runner, cmd.name)
return func(*args, **kwargs)
def generate_address(self):
address = self.wallet.create_new_address()
self.wallet.storage.write()
return address
class LBRYcrd(object):
def __init__(self, lbrycrdd_path):
self.lbrycrdd_conn_str = get_lbrycrdd_connection_string(lbrycrdd_path)
def __call__(self, method, *args, **kwargs):
return self.rpc(method)(*args, **kwargs)
def rpc(self, method):
return AuthServiceProxy(self.lbrycrdd_conn_str, service_name=method)
def get_wallet_path():
cwd = os.getcwd()
wallet_path = os.path.join(cwd, "wallet.json")
if not os.path.exists(wallet_path):
return wallet_path
i = 1
while True:
wallet_path = os.path.join(cwd, "wallet_%i.json" % i)
if not os.path.exists(wallet_path):
return wallet_path
i += 1
def coin_chooser(lbrycrdd, amount, fee=0.001):
def iter_txis():
unspent = lbrycrdd("listunspent")
unspent = sorted(unspent, key=lambda x: x['amount'], reverse=True)
spendable = Decimal(0.0)
for txi in unspent:
if spendable >= amount:
break
else:
spendable += txi['amount']
yield txi
if spendable < amount:
print spendable, amount
raise Exception("Not enough funds")
coins = list(iter(iter_txis()))
total = sum(c['amount'] for c in coins)
change = Decimal(total) - Decimal(amount) - Decimal(fee)
if change < 0:
raise Exception("Not enough funds")
if change:
change_address = lbrycrdd("getnewaddress")
else:
change_address = None
print "Total: %f, amount: %f, change: %f" % (total, amount, change)
return coins, change, change_address
def get_raw_tx(lbrycrdd, addresses, coins, amount, change, change_address):
txi = [{'txid': c['txid'], 'vout': c['vout']} for c in coins]
txo = {address: float(amount) for address in addresses}
if change_address:
txo[change_address] = float(change)
return lbrycrdd("createrawtransaction", txi, txo)
def main(count, value=None, lbryum_path=None, lbrycrdd_path=None):
count = int(count)
lbryum_path = lbryum_path or get_wallet_path()
if sys.platform == "darwin":
default_lbrycrdd = os.path.join(os.path.expanduser("~"),
"Library/Application Support/lbrycrd/lbrycrd.conf")
else:
default_lbrycrdd = os.path.join(os.path.expanduser("~"), ".lbrycrd/lbrycrd.conf")
lbrycrdd_path = lbrycrdd_path or default_lbrycrdd
l = LBRYcrd(lbrycrdd_path=lbrycrdd_path)
s = LBRYumWallet(lbryum_path)
value = value or 1.0
value = Decimal(value)
coins, change, change_address = coin_chooser(l, count * value)
addresses = [s.generate_address() for i in range(count)]
raw_tx = get_raw_tx(l, addresses, coins, value, change, change_address)
signed = l("signrawtransaction", raw_tx)['hex']
txid = l("sendrawtransaction", signed)
print txid
if __name__ == "__main__":
args = sys.argv[1:]
main(*args)

View file

@ -1,46 +0,0 @@
package tagsManager
import (
"testing"
)
func TestSanitizeTags(t *testing.T) {
got, err := SanitizeTags([]string{"this", "super", "expensive", "test", "has", "a lot of", "crypto", "currency", "in it", "trump", "will build the", "wall"}, "UCNQfQvFMPnInwsU_iGYArJQ")
if err != nil {
t.Error(err)
return
}
expectedTags := []string{
"blockchain",
"switzerland",
"news",
"science & technology",
"economics",
"experiments",
"this",
"in it",
"will build the",
"has",
"crypto",
"trump",
"wall",
"expensive",
"currency",
"a lot of",
}
if len(expectedTags) != len(got) {
t.Error("number of tags differ")
return
}
outer:
for _, et := range expectedTags {
for _, t := range got {
if et == t {
continue outer
}
}
t.Error("tag not found")
return
}
}

View file

@ -1,7 +1,11 @@
package tagsManager
package tags_manager
import (
"regexp"
"sort"
"strings"
log "github.com/sirupsen/logrus"
)
const (
@ -60,24 +64,37 @@ func SanitizeTags(tags []string, youtubeChannelID string) ([]string, error) {
}
}
sanitizedTags := make([]string, 0, len(originalTags)+len(curatedTags))
sort.Strings(curatedTags)
sort.Strings(originalTags)
sanitizedTags = append(sanitizedTags, curatedTags...)
sanitizedTags = append(sanitizedTags, originalTags...)
return sanitizedTags, nil
}
const TagMaxLength = 50
func normalizeTag(t string) (string, error) {
t = strings.ToLower(t)
multipleSpaces := regexp.MustCompile(`\s{2,}`)
leadingAndTrailingSpaces := regexp.MustCompile(`^\s+|\s$`)
hashTags := regexp.MustCompile(`(#\d+\s)|#+`)
inParenthesis := regexp.MustCompile(`\([^\)]+\)`)
weirdChars := regexp.MustCompile(`[^-\w'& +\/A-Za-zÀ-ÖØ-öø-ÿ]`)
startsOrEndsInWeirdChars := regexp.MustCompile(`^[^A-Za-zÀ-ÖØ-öø-ÿ0-9]+|[^A-Za-zÀ-ÖØ-öø-ÿ0-9]+$`)
t = hashTags.ReplaceAllString(t, "")
t = inParenthesis.ReplaceAllString(t, " ")
t = startsOrEndsInWeirdChars.ReplaceAllString(t, "")
t = multipleSpaces.ReplaceAllString(t, " ")
t = leadingAndTrailingSpaces.ReplaceAllString(t, "")
if weirdChars.MatchString(t) {
log.Debugf("tag '%s' has weird stuff in it, skipping\n", t)
return "", nil
}
if len(t) > TagMaxLength {
log.Debugf("tag '%s' is too long, skipping\n", t)
return "", nil
}
return t, nil
//r, err := regexp.Compile("/\\([^\\)]+\\)/g")
//if err != nil {
// return "", errors.Err(err)
//}
//r2, err := regexp.Compile("/[^\\w-'& \+]/g")
//if err != nil {
// return "", errors.Err(err)
//}
//t = r.ReplaceAllString(t, "")
//return r2.ReplaceAllString(t, ""), nil
}
type tagsSanitizer struct {
@ -128,13 +145,417 @@ func (ts *tagsSanitizer) add() {
}
const (
Lunduke = "UCkK9UDm_ZNrq_rIXCz3xCGA"
SwissExperiments = "UCNQfQvFMPnInwsU_iGYArJQ"
SwissExperiments = "UCNQfQvFMPnInwsU_iGYArJQ"
JustJuggling = "UCftqelpjmbFrUwr3VVzzVwA"
anupjanakpur = "UC_5tRfC4L2AbTz6mj6vZrKw"
PraveenMedia = "UC_fjE70lKNwM9AKIofQv-bA"
MisteriosDesvelados = "UC-FzxivscjYzonBDXgX2GLg"
kaipenchery = "UC-MU4K3Ghl-IdEX4J68tnTA"
Dhinchakpooja = "UC-stzLwoQF_R8Vnfb28d7Lg"
Karolajn = "UC-vYDJQiabg9BK8XTDPFspg"
thefantasio974 = "UC0GHeUcPxfNEZqbFyxy2frQ"
EduPrimitivo = "UC0odmP6ffEw3iVwTEyt-UKA"
isupportanupriyapatel = "UC0sxOzdHnmauMdOra0cxXVg"
theconfusedindian = "UC12ZPYxQbMA1loZE1OA609w"
guriaarts = "UC1fHp166o1Hd024fg3-d-jg"
minutodafisica = "UC1lUEcHrQwbusC5ext1himg"
khabir = "UC1yzNCXXk1h_tHgsqChE8mw"
OMapadaMinaMara = "UC2WMUPTbxQQ9hXq2_uutspg"
oliverjanich = "UC3cmEfpy4XED7YYEe69nIMA"
shyamsadhu = "UC3XAT8oBjL2RaqfblESHW_A"
EbaduRahmantech = "UC4950gpY6Qw1lCAcN6gNWwQ"
_1975oles = "UC4bGQWN4C8idymxlZxw77oA"
_8dsongsbollywood = "UC4nH7zmw41lRDbaiIddumjw"
EYESTV = "UC50CIbyHMydEEzEhV1ZNEBw"
Nono = "UC5yufoRPJy-1e1pR73UTxFQ"
lichtle = "UC6n_2v7YjwZ65F9h77nZPOA"
elabuelokraken = "UC7iWk2xziMR4hs_NzfsYytw"
cidvela = "UCaVOx5GCcSi2ELjWUdf05PA"
jayaskumar = "UCax9CJCQ6aY0bntP3mcIWKQ"
canaldarippi = "UCB_fba7yYMwa91F7rijBsVA"
FunMusicClassical = "UCB_X256IN7QiBtDkueo_6QQ"
minutodaterra = "UCB0zinWfy-dS_NqcOINYo3A"
franciscoalves = "UCbAm6YcZGk04obnFb-LuOSA"
criptomonedastv = "UCbK6Awel1-o-9JDVMFq808Q"
lux77 = "UCBYm4l3NX352goFtjSgZ7jA"
MrLuckyOficial = "UCEbMhGhZ1JoOYKgbgVyeQaA"
KotneKit = "UCf_1BQz3T151Eph0Sarj3Yw"
jaysgaming = "UCFAkedtc3jjDZQqqIULxqWg"
iamdineshthakur = "UCg0VInmUZoSdHD-DNmjifBA"
Musicreationz = "UCGJEtZB0Nj3rT3zZfyfjIZQ"
lafenixtv = "UCHAx8o0jj-8L_sqR5QM1Cwg"
guardeiafe = "UCHWjmG-Ce93VNcNZMMUyGQQ"
KHANEKIKHUSHBOO = "UCI8DRKcrfrHklRqSJHPhOwQ"
AjayGamingYT = "UCi9r5igvQblbI0PhJFj-IDg"
SzekelyVegan = "UCiqvKH1Ib_VPwh9bQf_hlXQ"
barzoy888 = "UCIYF1orTg4nDvv29ORoKXyA"
_7playstudiopersian = "UCJ3QUKcd9yOhiJzZBR2zT3Q"
eazypurple = "UCJ6Hd8S39g7pe65A6k42NZw"
GennadyM = "UCjZlOrmC7hi_1kMA6BKZXeQ"
GTGujarati = "UCKnYZmYFdFX4hP09Hh8k8jw"
annapavlidou = "UCKPSHyCFIDtMqzbJ_Qx-7aw"
Tamilmemesclub = "UCL_DPW38jcSGWy0Fu0A_hCg"
QuickReactionTeam = "UCl--LVcANGKHJYN7O8uY6OA"
rodrigojoseoficial = "UClMsSb57Jvexl1a0a2c2_CQ"
AzadChaiwala = "UCM5QNdoIefx6eumjPk8ZTMw"
lafenixextras = "UCM7zougvyPtfd9zw5HmAGlg"
JustSawAGhost = "UCmQYXE03n6qlF9FhtAPztcw"
mirchibangla = "UCmzj6hXrPZ_AwIZ8lgo-HuQ"
marsaguirre = "UCN3OMvdU7ySvE6meGjo9omg"
JustGoool = "UCn7kTV_syU_rI0jANxrpaFg"
nossocanal = "UCNDb9jdx5Jz-C7hWJv3y6Fw"
CSNN = "UCnfn-8PJbjYFAJ1fqzMdcew"
pedronedved = "UCNhb2Fz3sVLZcMb_arm1cMQ"
famoosh = "UCnQU2QZLVG1wxHoy_9LZucA"
Cenoritas = "UCo-VSGoYy_4IjXF9u4QqTiw"
fantabobgames = "UCo0U1tbk3YbqiLDhkeWOviQ"
thesoundofsoul = "UCOey6R7Ktnil8RZrY_xJYiA"
SaurabhMishraJournalist = "UCoHVCBoSfUOlQVa80dlN60g"
YoYoHoneySinghlive = "UCOZTp3nGj39-snzo4QSdduA"
CINEGLITZ = "UCP_p8JkLOwPcGAnxN1iVhZA"
lovetreta = "UCPi3uuGrh2mxnZJMfRhcjiw"
canalvendimeusofa = "UCpk58NDdaKdX0QiiA2e79tg"
CARIBEANVIDEOS = "UCPynGkfzH35RKyXRccCgzNg"
Dimon = "UCpZsZZ6KEqCHeAJ_Y0gEbyQ"
Anantvijaysoni = "UCQ3NeFKF2yo70xxBBUDeXXQ"
canalilhadebarbados = "UCR5L_Q8Tiljy7WKEQOrGKbg"
SotomayorTV2 = "UCR70CjRHxQilfEUgBBYpDhg"
Recipesarab = "UCRbMDfkH_bPUjQsv5dHcFQA"
_4ak10 = "UCsbrIjDPPXuVlWApYTnQECA"
Top50 = "UCSjXR8uvU4PijMM_kgyVQfw"
SoumyajitPyne = "UCSJXwgF3SePNfyojYG3yt6Q"
SneksShow = "UCsqZSkVccnyIxSE6pZuj13g"
casadosaber = "UCtvvTFp0XANyllOdmzZr9VQ"
Entarexyt = "UCuDkq6yAb5zdM2zIRG6VRDw"
UCurU6GLM4ggcLtWWAOTzlYw = "UCurU6GLM4ggcLtWWAOTzlYw"
promining = "UCviqKxlMnZqBipTzwixkGyg"
zapkids = "UCx2uGhYa9EuCNmbL72hVKGg"
Canalokok = "UCXyCAcjoWz9SMLGJT8dR0pA"
SmoothGames = "UCY8gvG25rOZ4hWWvcQH88PQ"
LIGATFA = "UCYhiIMOlDn_HEn3zvqZCDiw"
DeHamad = "UCZ5G07Vw7IaV81InBgYLCcg"
misszizi = "UCE55WTFs4ekJ_aWCoNEapbQ"
creationshub = "UCNfELkowZPIQ-Vegmb3pIBQ"
AlicePandora = "UCScRxEtwlt082_6ThI0YJbg"
Akito0405 = "UCXHWx1teSYIKwTGiscYaC-Q"
dashcambristol = "UCD0IC8bZI-MfIutkgPHlyWA"
adrianbonillap = "UCdmY7p_kC-QN8jS7rocmCSA"
SubhamVlogs = "UCXtdoCLRlnLIW3Skix0rHaQ"
DisciplesofJesusChristJeremiahPayne = "UC_y2rVsotcQcVF7LImVGs5Q"
ModernGalaxy = "UCGISiGs_RL7Z1qfs1-GD6PA"
socofilms = "UCyDS9p6NWHpU9XbbbYLFLBw"
buriedone = "UC_lm7xXB3adOTc0T1FwyGRQ"
bitcoin = "UC-f5nPBEDyUBZz_jOPBwAfQ"
globalrashid = "UC2ldcEtbR7cFYadgrnW3B6Q"
gameofbitcoins = "UC2WKsYBxMwx7E7ENkND-fkg"
CryptoInsightsBrasil = "UC4BrnREinCBUenQZi4ZU95w"
Crypt0 = "UCdUSSt-IEUg2eq46rD7lu_g"
cryptomined = "UCGQ3XHtsH8Q9iQr9bFbgfDA"
altcoinbuzz = "UCGyqEtcGQQtXyUwvcy7Gmyg"
crypto = "UCiMgF08KQ4z-Gnu8o2BLOxA"
Crypto99 = "UCjsrdOJCAKcuBqyyqjg1cCQ"
TheCryptoLark = "UCl2oCaw8hdR_kbqyqd2klIA"
NuggetsNews = "UCLo66QVfEod0nNM_GzKNxmQ"
btckyle = "UCNCGCxxTT10aeTgUMHW5FfQ"
LouisThomas = "UCpceefaJ9vs4RYUTsO9Y3FA"
BitsBeTrippin = "UCVVWXoQfMfQVuzxcLylq9aA"
cryptocrow = "UCwsRWmIL5XKqFtdytBfeX0g"
KouSuccessLeeFX = "UC0YkP4Fg_d8y6JFwLj3MLdg"
TheSchiffReport = "UCIjuLiLHdFxYtFmWlbTGQRQ"
MRU = "UCnkEhPBMZcEO0QGu51fDFDg"
Vidello = "UCwMaWqZ6SdDpTaYOW0huELw"
kaccreative = "UC02O9ICMuwrfULSa0N6SiSw"
shecooks = "UChZYqTJkeYV2r7WETcytdPQ"
vegsource = "UClEsPxvotpTJ1Z8eu2Y97rg"
VeganGains = "UCr2eKhGzPhN5RPVk5dd5o3g"
AwesomeKnowledge = "UC_pC4T8vr-caiNDYU6PYTTA"
luckyloush = "UCbO0Oomf_jr20Wb4V2ap5Uw"
MichaelLuzzi = "UCDwLrj4DSGrw3gFvANbl_Cw"
NakedApe = "UCMOaRU-YsXVgU-WahBkZqWQ"
ParaReact = "UCuJKELjsmWTlJG0X7T4ZD_A"
dullytv = "UCv1J91Nhn7KsxMFaxKChT3w"
REDONKULAS = "UCwd_sSDZ8EQt6SEeOO2tBRA"
RedactedTonight = "UCyvaZ2RHEDrgKXz43gz7CbQ"
TonyTornado = "UCZu9AV3mrCCDpK_dy1qJRAQ"
hoodlumscrafty = "UC0cTVjYKgAnBrXQKcICyNmA"
Karmakut = "UC2B8TOklu2rSDULqAzwn5GQ"
Draxr = "UC72o3j23E1wKskBDEKlmTOw"
BigfryTV = "UC7FVdUA3SxDMfl4fLBGlADg"
CSGONews = "UC7L6NRLyldvxWukhOHABazQ"
ZedGaming = "UC7Q15H71DmB7F1iSFE1Z8LQ"
KamiVS = "UC9fh15yUcGAr8iUfQaoRRpQ"
grabthegames = "UCaJFEgY6ij05Fxgn6qtcX_g"
blueplays = "UCbjMsFlYb2NLpjS3uDzm9ow"
ImNotBonkers = "UCCyN0G77B7wnf-1AOTI3gWQ"
GunslingerMedia = "UCDjs4JoXmMzvaZyPVsvjKXw"
LeagueVoices = "UCdkonRjBLzr1Adf7Jhu1bWQ"
SirPugger = "UCelqWKTcCvBZP_1_1iYZAWA"
gamesoup = "UCGPMrF9AN_D9BrmSmMeV3hA"
ProHenis = "UCIjFoXSQ9HYbcWmRVIsH-Ow"
Dota2Divine = "UCiR9IHCurqVHpC821NVcW6g"
CrazyFoxMovies = "UCjewtQLpJEENPLbrCtb6YpA"
Larry = "UCJVdNvvuvOnthuWVQjYff2w"
hottake = "UCK24784Oqb4oYQfHL9XePNg"
Breezy = "UCKWRpZpcLKriWd1am9SHf8A"
retrorgb = "UCLPIbBCKVH2uKGm5C4sOkew"
Zer0Gamer = "UCmII34jN4rqCIsGqWkK5JEg"
nickatnyte = "UCMxYQX1zaepCgmiSmwbT39w"
DyllonStejGaming = "UCngaLL0QDbsAGYj7zsB8o_Q"
bluedrake42 = "UCNSwcDEUfIEzYdAPscXo6ZA"
Rerez = "UCoFpRCAsKfWAshvLE1bYzdw"
Op = "UCowi5kFfvGXR8NqhyE6jneQ"
kidsgamesfun = "UCqjGzmb2pMWSFYm0GzKeTEA"
GamesGlitches = "UCRj3Q06KOxAZWHaHScrkAOw"
Musikage = "UCsej4tgCoXDgVH3J7M3NMgw"
JeffyGaming = "UCTZzSNnZ43XQslejT5wFdRw"
oniblackmage = "UCUEF9XL3o8dZ6hvVf8jAi8Q"
nickatnyte2 = "UCuoTqrobMyZj0ge8LOCkiSw"
KazeEmanuar = "UCuvSqzfO_LV_QzHdmEj84SQ"
TheLinuxGamer = "UCv1Kcz-CuGM6mxzL3B1_Eiw"
GaminGHD = "UCW-thz5HxE-goYq8yPds1Gw"
BHGaming = "UCX4N3DioqqrugFeilxTkSIw"
Potato = "UCxPPTDNH85HZWxrgZ3FQBYA"
MikeyTaylorGaming = "UCycXj6lRWtsSqo-bZOIZePw"
Acituanbus = "UCzfx1QvKjn-BxLMLBdBGgMw"
juggling = "UC2fhTIbnQlFYaFzyTcmPkXg"
KhanAcademy = "UC4a-Gbdw7vOaccHmFo40b9g"
DON = "UCAYrPk70AePJZSaVLKrWdfQ"
shogogarcia = "UCE3yZjxDg3iI91bcNJDFnsg"
alphalifestyleacademy = "UCeggEaXtJu2domMahYMD_ig"
NileRed = "UCFhXFikryT4aFcLkLw2LBLA"
veritasium = "UCHnyfMqiRRG1u-2MsSQLbXA"
stevecronin = "UCJYawZQYwjZ76mrF_US9eNg"
jeranism = "UCS_FY5mR4g22L_E9t1D_ExQ"
MinutePhysics = "UCUHW94eEFW7hkUMVaZz4eDg"
_3Blue1Brown = "UCYO_jab_esuFRV4b17AJtAw"
Itsrucka = "UC-B2LyEZcl3avG0coKeohGQ"
unitedtaps = "UC5Q8e9-uutVZmiwAcEHKuzA"
srodalmenara = "UC9kc1DaOy2kSzXHZgpn9kfQ"
ShutupAndPlay = "UCAwuvzhah0KUw5QNihSkEwQ"
sanx2 = "UCJ_waKl9kjbhfXI3LJOfHvA"
DerickWattsAndTheSundayBlues = "UCmZhiZq7M7d73Kbey4yna_Q"
akirathedon = "UCsoiSpBvkr4Y-78Pj3recUw"
Musicoterapia = "UCsoSK8K4OpdMV1tqJFwO5QA"
daydreaming = "UCtbuGylbRXc42pIxWey19Dg"
RemixHolicRecords = "UCUW5GjwcXgbPcfk00t-GQZA"
EDMBot = "UCvmUdL2NHWlj1NRiNJPI-TQ"
TrioTravels = "UCdAPAmdnkdFsH5R2Hxevucg"
caosonnguyen = "UCwPeW9kFId5-VbQ2LQEjVhg"
TheAlmonteFilms = "UC4C_SF5koS4Q5om50b9NMTw"
timcast = "UCG749Dj4V2fKa143f8sE60Q"
SeekingTheTruth = "UCHrDpTVL9S0h91u9UCPgVbA"
davidpakman = "UCvixJtaXuNdMPUGdOPcY8Ag"
mikenayna = "UCzk08fzh5c_BhjQa1w35wtA"
DoorMonster = "UC-to_wlckb-bFDtQfUZL3Kw"
barnacules = "UC1MwJy1R0nGQkXxRD9p-zTQ"
brightsunfilms = "UC5k3Kc0avyDJ2nG9Kxm9JmQ"
Onision = "UC5OxQNCgW88FDBxeZCnrBbg"
top10archive = "UCa03bf8gAS2EtffptV-_jfA"
thought = "UCb0yiUQhhLV_jpY3BayJaLA"
MothersBasement = "UCBs2Y3i14e1NWQxOGliatmg"
VlogsOfKnowledge = "UCDUPGR6uL5uz0_hiRAQjZMQ"
GorTheMovieGod = "UCHdTVw89QU6coU1MgN-9RHA"
iamalexoconnor = "UCKAQLEk1GGqnPtov9EW0huQ"
ADedits = "UCsX-zRuq3ovMsgFqEQLw2Bw"
SynthCool = "UCxGTHsD0pLSFlFI7M7jYmBQ"
JordanBPeterson = "UCL_f53ZEJxp8TtlOkHwMV9Q"
Sciencedocumentinhindi = "UC9SpfUF3rm-MGep5WE6FSCA"
NurdRage = "UCIgKGGJkt1MrNmhq3vRibYA"
MINDBLASTER = "UC_ZMqbRu44jK-EogjYyHz8g"
DaminousPurity = "UCdKRWvz50QoioZFgu6Nf9og"
ScammerRevolts = "UC0uJKUXiU5T41Fzawy5H6mw"
eevblog = "UC2DjFE7Xf11URZqWBigcVOQ"
Luke = "UC2eYFnH61tmytImy1mTYvhA"
AppGirl = "UC389S4_2Yt9cei1qNDwmBrA"
thecryptodad = "UC68x_TIzqCtF69fYl2_kl3w"
ChrisWereDigital = "UCAPR27YUyxmgwm3Wc2WSHLw"
alecaddd = "UCbmBY_XYZqCa2G0XmFA7ZWg"
EliTheComputerGuy = "UCD4EOyXKjfDUhCI6jlOZZYQ"
archetapp = "UCDIBBmkZIB2hjBsk1hUImdA"
PCPlaceNZ = "UCf5ZTSZAKbinY03jOwylfOg"
NaomiSexyCyborgWu = "UCh_ugKacslKhsGGdXP0cRRA"
NibiruWatcher = "UCi62JvN-lUn7hVL3ffofADA"
imineblocks = "UCjYHcWGAjUVqU49D2JOKD3w"
Lunduke = "UCkK9UDm_ZNrq_rIXCz3xCGA"
CooLoserTech = "UCl97rZ2Tc7KV9lktmmHNFDQ"
TechHD = "UCN3bPy04Jkp3ADRtyYvXomQ"
MiketheScrapper = "UCqtlJpkH_llXS_vuDExGVvw"
eevblog2 = "UCr-cm90DwFJC0W3f9jBs5jA"
thecreativeone = "UCTikFhzCiIXfOMS7D29dvYg"
weekendtricks = "UCYtAJXx0ymGPpCndn2Gt6-w"
GBGuns = "UC2VOURrALs1CwVmbGlXJOPQ"
Matsimus = "UCFWjEwhX6cSAKBQ28pufG3w"
TheLateBoyScout = "UCZjvj5MN3BMxPFfdEKIrvxQ"
BravoCinematografica = "UC2ruSXQoKMgr7JXzwO0H0KA"
PyroNation = "UC4ffy3n1hE7Z8q-2KVq_91Q"
dramatuber = "UC4Y8mImty3gFEG9grsR6T-w"
BarnabasNagy = "UC8TRZRK1sJfxKJ0tXMmGTow"
avery = "UCcfjIZLDCuSqkIlkH8i4DDg"
Tingledove = "UCfI7wtV6K64gVbzjH7DOA_Q"
crmjewelers = "UChpFWeF84jA5JeV3YyIo3pQ"
NerfNerd18 = "UCIgmaEJNqvH9bU9zGMapZGg"
SEIJIHITO = "UCNqUrLE6dI8fWw_u3HQkpXA"
anvithavlogs = "UCsP9pYat2DEBvnvF_iFGG_w"
YoelRekts = "UCZ_BcFyhIo6GdtTSrqXXepg"
TechFox = "UCIp-oTSdFO7BhAJpW2d5HMQ"
)
var channelWideTags = map[string][]string{
Lunduke: {"linux", "technology"},
SwissExperiments: {"science & technology", "experiments", "switzerland"},
JustJuggling: {"juggling", "circus arts", "malabares"},
SwissExperiments: {"science & technology", "experiments", "switzerland"},
TechFox: {"technology", "reviews"},
misszizi: {"art", "pop culture"},
creationshub: {"art"},
AlicePandora: {"art"},
Akito0405: {"art"},
dashcambristol: {"automotive"},
adrianbonillap: {"automotive"},
SubhamVlogs: {"automotive"},
DisciplesofJesusChristJeremiahPayne: {"beliefs"},
ModernGalaxy: {"beliefs"},
socofilms: {"beliefs"},
buriedone: {"blockchain"},
bitcoin: {"blockchain"},
globalrashid: {"blockchain"},
gameofbitcoins: {"blockchain"},
CryptoInsightsBrasil: {"blockchain"},
Crypt0: {"blockchain"},
cryptomined: {"blockchain"},
altcoinbuzz: {"blockchain", "technology"},
crypto: {"blockchain"},
Crypto99: {"blockchain"},
TheCryptoLark: {"blockchain", "technology"},
NuggetsNews: {"blockchain", "learning"},
btckyle: {"blockchain"},
LouisThomas: {"blockchain"},
BitsBeTrippin: {"blockchain"},
cryptocrow: {"blockchain"},
KouSuccessLeeFX: {"economics", "learning"},
TheSchiffReport: {"economics"},
MRU: {"economics"},
Vidello: {"economics", "pop culture"},
kaccreative: {"food", "art"},
shecooks: {"food"},
vegsource: {"food"},
VeganGains: {"food"},
AwesomeKnowledge: {"funny"},
luckyloush: {"funny"},
MichaelLuzzi: {"funny", "pop culture"},
NakedApe: {"funny"},
ParaReact: {"funny"},
dullytv: {"funny"},
REDONKULAS: {"funny"},
RedactedTonight: {"funny", "news"},
TonyTornado: {"funny"},
hoodlumscrafty: {"gaming", "pop culture"},
Karmakut: {"gaming"},
Draxr: {"gaming", "pop culture"},
BigfryTV: {"gaming"},
CSGONews: {"gaming", "funny"},
ZedGaming: {"gaming"},
KamiVS: {"gaming"},
grabthegames: {"gaming"},
blueplays: {"gaming"},
ImNotBonkers: {"gaming", "funny"},
GunslingerMedia: {"gaming"},
LeagueVoices: {"gaming"},
SirPugger: {"gaming"},
gamesoup: {"gaming"},
ProHenis: {"gaming"},
Dota2Divine: {"gaming"},
CrazyFoxMovies: {"gaming", "pop culture", "technology"},
Larry: {"gaming"},
hottake: {"gaming", "funny"},
Breezy: {"gaming", "funny"},
retrorgb: {"gaming"},
Zer0Gamer: {"gaming"},
nickatnyte: {"gaming"},
DyllonStejGaming: {"gaming"},
bluedrake42: {"gaming"},
Rerez: {"gaming"},
Op: {"gaming"},
kidsgamesfun: {"gaming", "pop culture"},
GamesGlitches: {"gaming"},
Musikage: {"gaming", "pop culture"},
JeffyGaming: {"gaming", "funny"},
oniblackmage: {"gaming"},
nickatnyte2: {"gaming"},
KazeEmanuar: {"gaming"},
TheLinuxGamer: {"gaming", "technology", "linux"},
GaminGHD: {"gaming", "pop culture"},
BHGaming: {"gaming"},
Potato: {"gaming"},
MikeyTaylorGaming: {"gaming"},
Acituanbus: {"gaming"},
juggling: {"juggling", "circus art", "malabares"},
KhanAcademy: {"learning", "science"},
DON: {"learning", "pop culture"},
shogogarcia: {"learning"},
alphalifestyleacademy: {"learning"},
NileRed: {"learning", "science"},
veritasium: {"learning", "science"},
stevecronin: {"learning"},
jeranism: {"learning"},
MinutePhysics: {"learning", "science"},
_3Blue1Brown: {"learning"},
Itsrucka: {"music"},
unitedtaps: {"music"},
srodalmenara: {"music"},
ShutupAndPlay: {"music", "learning"},
sanx2: {"music"},
DerickWattsAndTheSundayBlues: {"music", "funny"},
akirathedon: {"music"},
Musicoterapia: {"music"},
daydreaming: {"music"},
RemixHolicRecords: {"music"},
EDMBot: {"music"},
TrioTravels: {"nature"},
caosonnguyen: {"nature"},
TheAlmonteFilms: {"news"},
timcast: {"news", "technology"},
SeekingTheTruth: {"news"},
davidpakman: {"news"},
mikenayna: {"news"},
DoorMonster: {"pop culture", "funny"},
barnacules: {"pop culture", "gaming"},
brightsunfilms: {"pop culture"},
Onision: {"pop culture", "funny"},
top10archive: {"pop culture"},
thought: {"pop culture", "learning"},
MothersBasement: {"pop culture", "gaming"},
VlogsOfKnowledge: {"pop culture", "gaming"},
GorTheMovieGod: {"pop culture"},
iamalexoconnor: {"pop culture"},
ADedits: {"pop culture"},
SynthCool: {"pop culture", "funny"},
JordanBPeterson: {"psychology", "postmodernism", "news"},
Sciencedocumentinhindi: {"science"},
NurdRage: {"science", "learning"},
MINDBLASTER: {"sports", "funny"},
DaminousPurity: {"sports", "gaming"},
ScammerRevolts: {"technology"},
eevblog: {"technology", "learning"},
Luke: {"technology", "funny"},
AppGirl: {"technology"},
thecryptodad: {"technology", "blockchain"},
ChrisWereDigital: {"technology"},
alecaddd: {"technology", "learning"},
EliTheComputerGuy: {"technology"},
archetapp: {"technology", "learning"},
PCPlaceNZ: {"technology"},
NaomiSexyCyborgWu: {"technology"},
NibiruWatcher: {"technology", "learning"},
imineblocks: {"technology", "blockchain"},
Lunduke: {"technology"},
CooLoserTech: {"technology"},
TechHD: {"technology", "learning"},
MiketheScrapper: {"technology"},
eevblog2: {"technology"},
thecreativeone: {"technology", "gaming"},
weekendtricks: {"technology"},
GBGuns: {"weapons"},
Matsimus: {"weapons", "gaming"},
TheLateBoyScout: {"weapons"},
}
var tagsToSkip = map[string]*struct{}{
"#hangoutsonair": nil,

View file

@ -0,0 +1,144 @@
package tags_manager
import (
"fmt"
"testing"
)
func TestSanitizeTags(t *testing.T) {
got, err := SanitizeTags([]string{"this", "super", "expensive", "test", "has", "a lot of", "crypto", "currency", "in it", "trump", "will build the", "wall"}, "UCNQfQvFMPnInwsU_iGYArJQ")
if err != nil {
t.Error(err)
return
}
expectedTags := []string{
"blockchain",
"switzerland",
"news",
"science & technology",
"economics",
"experiments",
"this",
"in it",
"will build the",
"has",
"crypto",
"trump",
"wall",
"expensive",
"currency",
"a lot of",
}
if len(expectedTags) != len(got) {
t.Error("number of tags differ")
return
}
outer:
for _, et := range expectedTags {
for _, t := range got {
if et == t {
continue outer
}
}
t.Error("tag not found")
return
}
}
func TestNormalizeTag(t *testing.T) {
tags := []string{
"blockchain",
"Switzerland",
"news ",
" science & Technology ",
"economics",
"experiments",
"this",
"in it",
"will build the (WOOPS)",
"~has",
"crypto",
"trump",
"wall",
"expensive",
"!currency",
" a lot of ",
"#",
"#whatever",
"#123",
"#123 Something else",
"#123aaa",
"!asdasd",
"CASA BLANCA",
"wwe 2k18 Elimination chamber!",
"pero'",
"però",
"è proprio",
"Ep 29",
"sctest29 Keddr",
"mortal kombat 11 shang tsung",
"!asdasd!",
}
normalizedTags := make([]string, 0, len(tags))
for _, tag := range tags {
got, err := normalizeTag(tag)
if err != nil {
t.Error(err)
return
}
if got != "" {
normalizedTags = append(normalizedTags, got)
}
fmt.Printf("Got tag: '%s'\n", got)
}
expected := []string{
"blockchain",
"switzerland",
"news",
"science & technology",
"economics",
"experiments",
"this",
"in it",
"will build the",
"has",
"crypto",
"trump",
"wall",
"expensive",
"currency",
"a lot of",
"whatever",
"123",
"something else",
"123aaa",
"asdasd",
"casa blanca",
"wwe 2k18 elimination chamber",
"pero",
"però",
"è proprio",
"ep 29",
"sctest29 keddr",
"mortal kombat 11 shang tsung",
"asdasd",
}
if !Equal(normalizedTags, expected) {
t.Error("result not as expected")
return
}
}
func Equal(a, b []string) bool {
if len(a) != len(b) {
fmt.Printf("expected length %d but got %d", len(b), len(a))
return false
}
for i, v := range a {
if v != b[i] {
fmt.Printf("expected %s but bot %s\n", b[i], v)
return false
}
}
return true
}

View file

@ -1,17 +1,20 @@
package thumbs
import (
"google.golang.org/api/youtube/v3"
"io"
"net/http"
"os"
"strings"
"github.com/lbryio/errors.go"
"github.com/lbryio/ytsync/v5/configs"
"github.com/lbryio/ytsync/v5/downloader/ytdl"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/prometheus/common/log"
log "github.com/sirupsen/logrus"
)
type thumbnailUploader struct {
@ -25,13 +28,15 @@ const thumbnailPath = "/tmp/ytsync_thumbnails/"
const ThumbnailEndpoint = "https://thumbnails.lbry.com/"
func (u *thumbnailUploader) downloadThumbnail() error {
_ = os.Mkdir(thumbnailPath, 0750)
_ = os.Mkdir(thumbnailPath, 0777)
img, err := os.Create("/tmp/ytsync_thumbnails/" + u.name)
if err != nil {
return errors.Err(err)
}
defer img.Close()
if strings.HasPrefix(u.originalUrl, "//") {
u.originalUrl = "https:" + u.originalUrl
}
resp, err := http.Get(u.originalUrl)
if err != nil {
return errors.Err(err)
@ -61,11 +66,14 @@ func (u *thumbnailUploader) uploadThumbnail() error {
uploader := s3manager.NewUploader(s3Session)
_, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String("thumbnails.lbry.com"),
Key: key,
Body: thumb,
ACL: aws.String("public-read"),
Bucket: aws.String("thumbnails.lbry.com"),
Key: key,
Body: thumb,
ACL: aws.String("public-read"),
ContentType: aws.String("image/jpeg"),
CacheControl: aws.String("public, max-age=2592000"),
})
u.mirroredUrl = ThumbnailEndpoint + u.name
return errors.Err(err)
}
@ -76,11 +84,11 @@ func (u *thumbnailUploader) deleteTmpFile() {
log.Infof("failed to delete local thumbnail file: %s", err.Error())
}
}
func MirrorThumbnail(url string, name string, s3Config aws.Config) (string, error) {
func MirrorThumbnail(url string, name string) (string, error) {
tu := thumbnailUploader{
originalUrl: url,
name: name,
s3Config: s3Config,
s3Config: *configs.Configuration.AWSThumbnailsS3Config.GetS3AWSConfig(),
}
err := tu.downloadThumbnail()
if err != nil {
@ -93,18 +101,26 @@ func MirrorThumbnail(url string, name string, s3Config aws.Config) (string, erro
return "", err
}
//this is our own S3 storage
tu2 := thumbnailUploader{
originalUrl: url,
name: name,
s3Config: *configs.Configuration.ThumbnailsS3Config.GetS3AWSConfig(),
}
err = tu2.uploadThumbnail()
if err != nil {
return "", err
}
return tu.mirroredUrl, nil
}
func GetBestThumbnail(thumbnails *youtube.ThumbnailDetails) *youtube.Thumbnail {
if thumbnails.Maxres != nil {
return thumbnails.Maxres
} else if thumbnails.High != nil {
return thumbnails.High
} else if thumbnails.Medium != nil {
return thumbnails.Medium
} else if thumbnails.Standard != nil {
return thumbnails.Standard
func GetBestThumbnail(thumbnails []ytdl.Thumbnail) *ytdl.Thumbnail {
var bestWidth ytdl.Thumbnail
for _, thumbnail := range thumbnails {
if bestWidth.Width < thumbnail.Width {
bestWidth = thumbnail
}
}
return thumbnails.Default
return &bestWidth
}

114
timing/timing.go Normal file
View file

@ -0,0 +1,114 @@
package timing
import (
"sync"
"sync/atomic"
"time"
"github.com/lbryio/ytsync/v5/metrics"
"github.com/sirupsen/logrus"
)
type Timing struct {
component string
milliseconds int64
min int64
max int64
invocations int32
}
var timings *sync.Map
func TimedComponent(component string) *Timing {
if timings == nil {
timings = &sync.Map{}
}
stored, _ := timings.LoadOrStore(component, &Timing{
component: component,
milliseconds: 0,
min: int64(99999999),
})
t, _ := stored.(*Timing)
return t
}
func ClearTimings() {
if timings == nil {
return
}
timings.Range(func(key interface{}, value interface{}) bool {
timings.Delete(key)
return true
})
}
func Report() {
var totalTime time.Duration
timings.Range(func(key interface{}, value interface{}) bool {
totalTime += value.(*Timing).Get()
return true
})
timings.Range(func(key interface{}, value interface{}) bool {
component := key
componentRuntime := value.(*Timing).Get().String()
percentTime := float64(value.(*Timing).Get()) / float64(totalTime) * 100
invocations := value.(*Timing).Invocations()
avgTime := (time.Duration(int64(float64(value.(*Timing).Get()) / float64(value.(*Timing).Invocations())))).String()
minRuntime := value.(*Timing).Min().String()
maxRuntime := value.(*Timing).Max().String()
logrus.Printf("component %s ran for %s (%.2f%% of the total time) - invoked %d times with an average of %s per call, a minimum of %s and a maximum of %s",
component,
componentRuntime,
percentTime,
invocations,
avgTime,
minRuntime,
maxRuntime,
)
return true
})
}
func (t *Timing) Add(d time.Duration) {
metrics.Durations.WithLabelValues(t.component).Observe(d.Seconds())
atomic.AddInt64(&t.milliseconds, d.Milliseconds())
for {
oldMin := atomic.LoadInt64(&t.min)
if d.Milliseconds() < oldMin {
if atomic.CompareAndSwapInt64(&t.min, oldMin, d.Milliseconds()) {
break
}
} else {
break
}
}
for {
oldMax := atomic.LoadInt64(&t.max)
if d.Milliseconds() > oldMax {
if atomic.CompareAndSwapInt64(&t.max, oldMax, d.Milliseconds()) {
break
}
} else {
break
}
}
atomic.AddInt32(&t.invocations, 1)
}
func (t *Timing) Get() time.Duration {
ms := atomic.LoadInt64(&t.milliseconds)
return time.Duration(ms) * time.Millisecond
}
func (t *Timing) Invocations() int32 {
return atomic.LoadInt32(&t.invocations)
}
func (t *Timing) Min() time.Duration {
ms := atomic.LoadInt64(&t.min)
return time.Duration(ms) * time.Millisecond
}
func (t *Timing) Max() time.Duration {
ms := atomic.LoadInt64(&t.max)
return time.Duration(ms) * time.Millisecond
}

109
util/archive.go Normal file
View file

@ -0,0 +1,109 @@
package util
import (
"archive/tar"
"io"
"io/fs"
"os"
"path/filepath"
"github.com/lbryio/lbry.go/v2/extras/errors"
)
func CreateTarball(tarballFilePath string, filePaths []string) error {
file, err := os.Create(tarballFilePath)
if err != nil {
return errors.Err("Could not create tarball file '%s', got error '%s'", tarballFilePath, err.Error())
}
defer file.Close()
tarWriter := tar.NewWriter(file)
defer tarWriter.Close()
for _, filePath := range filePaths {
err := addFileToTarWriter(filePath, tarWriter)
if err != nil {
return errors.Err("Could not add file '%s', to tarball, got error '%s'", filePath, err.Error())
}
}
return nil
}
func addFileToTarWriter(filePath string, tarWriter *tar.Writer) error {
file, err := os.Open(filePath)
if err != nil {
return errors.Err("Could not open file '%s', got error '%s'", filePath, err.Error())
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return errors.Err("Could not get stat for file '%s', got error '%s'", filePath, err.Error())
}
header := &tar.Header{
Name: stat.Name(),
Size: stat.Size(),
Mode: int64(stat.Mode()),
ModTime: stat.ModTime(),
}
err = tarWriter.WriteHeader(header)
if err != nil {
return errors.Err("Could not write header for file '%s', got error '%s'", filePath, err.Error())
}
_, err = io.Copy(tarWriter, file)
if err != nil {
return errors.Err("Could not copy the file '%s' data to the tarball, got error '%s'", filePath, err.Error())
}
return nil
}
func Untar(tarball, target string) error {
reader, err := os.Open(tarball)
if err != nil {
return errors.Err(err)
}
defer reader.Close()
tarReader := tar.NewReader(reader)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
} else if err != nil {
return errors.Err(err)
}
path := filepath.Join(target, header.Name)
info := header.FileInfo()
if info.IsDir() {
if err = os.MkdirAll(path, info.Mode()); err != nil {
return errors.Err(err)
}
continue
}
err = extractFile(path, info, tarReader)
if err != nil {
return err
}
}
return nil
}
func extractFile(path string, info fs.FileInfo, tarReader *tar.Reader) error {
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode())
if err != nil {
return errors.Err(err)
}
defer file.Close()
_, err = io.Copy(file, tarReader)
if err != nil {
return errors.Err(err)
}
return nil
}

38
util/log_wrapper.go Normal file
View file

@ -0,0 +1,38 @@
package util
import (
"fmt"
"github.com/lbryio/lbry.go/v2/extras/util"
log "github.com/sirupsen/logrus"
)
// SendErrorToSlack Sends an error message to the default channel and to the process log.
func SendErrorToSlack(format string, a ...interface{}) {
message := format
if len(a) > 0 {
message = fmt.Sprintf(format, a...)
}
log.Errorln(message)
log.SetLevel(log.InfoLevel) //I don't want to change the underlying lib so this will do...
err := util.SendToSlack(":sos: ```" + message + "```")
log.SetLevel(log.DebugLevel)
if err != nil {
log.Errorln(err)
}
}
// SendInfoToSlack Sends an info message to the default channel and to the process log.
func SendInfoToSlack(format string, a ...interface{}) {
message := format
if len(a) > 0 {
message = fmt.Sprintf(format, a...)
}
log.Infoln(message)
log.SetLevel(log.InfoLevel) //I don't want to change the underlying lib so this will do...
err := util.SendToSlack(":information_source: " + message)
log.SetLevel(log.DebugLevel)
if err != nil {
log.Errorln(err)
}
}

422
util/util.go Normal file
View file

@ -0,0 +1,422 @@
package util
import (
"context"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/lbrycrd"
"github.com/lbryio/ytsync/v5/configs"
"github.com/lbryio/ytsync/v5/timing"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/mitchellh/go-ps"
log "github.com/sirupsen/logrus"
)
func GetBlobsDir() string {
blobsDir := os.Getenv("BLOBS_DIRECTORY")
if blobsDir == "" {
usr, err := user.Current()
if err != nil {
log.Error(err.Error())
return ""
}
blobsDir = usr.HomeDir + "/.lbrynet/blobfiles/"
}
return blobsDir
}
func IsBlobReflectionOff() bool {
return os.Getenv("REFLECT_BLOBS") == "false"
}
func GetLBRYNetDir() string {
lbrynetDir := os.Getenv("LBRYNET_DIR")
if lbrynetDir == "" {
usr, err := user.Current()
if err != nil {
log.Errorln(err.Error())
return ""
}
return usr.HomeDir + "/.lbrynet/"
}
return lbrynetDir
}
func GetLbryumDir() string {
lbryumDir := os.Getenv("LBRYUM_DIR")
if lbryumDir == "" {
usr, err := user.Current()
if err != nil {
log.Errorln(err.Error())
return ""
}
return usr.HomeDir + "/.lbryum/"
}
return lbryumDir + "/"
}
const ALL = true
const ONLINE = false
func GetLBRYNetContainer(all bool) (*types.Container, error) {
return getDockerContainer("lbrynet", all)
}
func getDockerContainer(name string, all bool) (*types.Container, error) {
cli, err := client.NewEnvClient()
if err != nil {
panic(err)
}
filters := filters.NewArgs()
filters.Add("name", name)
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{All: all, Filters: filters})
if err != nil {
panic(err)
}
if len(containers) == 0 {
return nil, nil
}
if len(containers) > 1 {
return nil, errors.Err("more than one %s container found", name)
}
return &containers[0], nil
}
func IsUsingDocker() bool {
useDocker, err := strconv.ParseBool(os.Getenv("LBRYNET_USE_DOCKER"))
if err != nil {
return false
}
return useDocker
}
func IsRegTest() bool {
usesRegtest, err := strconv.ParseBool(os.Getenv("REGTEST"))
if err != nil {
return false
}
return usesRegtest
}
func GetLbrycrdClient(lbrycrdString string) (*lbrycrd.Client, error) {
chainName := os.Getenv("CHAINNAME")
chainParams, ok := lbrycrd.ChainParamsMap[chainName]
if !ok {
chainParams = lbrycrd.MainNetParams
}
var lbrycrdd *lbrycrd.Client
var err error
if lbrycrdString == "" {
lbrycrdd, err = lbrycrd.NewWithDefaultURL(&chainParams)
if err != nil {
return nil, err
}
} else {
lbrycrdd, err = lbrycrd.New(lbrycrdString, &chainParams)
if err != nil {
return nil, err
}
}
return lbrycrdd, nil
}
func ShouldCleanOnStartup() bool {
shouldClean, err := strconv.ParseBool(os.Getenv("CLEAN_ON_STARTUP"))
if err != nil {
return false
}
return shouldClean
}
func IsLbrynetRunning() (bool, error) {
if IsUsingDocker() {
container, err := GetLBRYNetContainer(ONLINE)
if err != nil {
return false, err
}
return container != nil, nil
}
processes, err := ps.Processes()
if err != nil {
return true, errors.Err(err)
}
var daemonProcessId = -1
for _, p := range processes {
if p.Executable() == "lbrynet" {
daemonProcessId = p.Pid()
break
}
}
running := daemonProcessId != -1
return running, nil
}
func CleanForStartup() error {
if !IsRegTest() {
return errors.Err("never cleanup wallet outside of regtest and with caution. this should only be done in local testing and requires regtest to be on")
}
running, err := IsLbrynetRunning()
if err != nil {
return err
}
if running {
err := StopDaemon()
if err != nil {
return err
}
}
err = CleanupLbrynet()
if err != nil {
return errors.Err(err)
}
lbrycrd, err := GetLbrycrdClient(configs.Configuration.LbrycrdString)
if err != nil {
return errors.Prefix("error getting lbrycrd client", err)
}
height, err := lbrycrd.GetBlockCount()
if err != nil {
return errors.Err(err)
}
const minBlocksForUTXO = 200
if height < minBlocksForUTXO {
//Start reg test with some credits
txs, err := lbrycrd.Generate(uint32(minBlocksForUTXO) - uint32(height))
if err != nil {
return errors.Err(err)
}
log.Debugf("REGTEST: Generated %d transactions to get some LBC!", len(txs))
}
defaultWalletDir := GetDefaultWalletPath()
_, err = os.Stat(defaultWalletDir)
if os.IsNotExist(err) {
return nil
}
return errors.Err(os.Remove(defaultWalletDir))
}
func CleanupLbrynet() error {
//make sure lbrynet is off
running, err := IsLbrynetRunning()
if err != nil {
return err
}
if running {
return errors.Prefix("cannot cleanup lbrynet as the daemon is running", err)
}
lbrynetDir := GetLBRYNetDir()
files, err := filepath.Glob(lbrynetDir + "lbrynet.sqlite*")
if err != nil {
return errors.Err(err)
}
for _, f := range files {
err = os.Remove(f)
if err != nil {
return errors.Err(err)
}
}
blobsDir := GetBlobsDir()
err = os.RemoveAll(blobsDir)
if err != nil {
return errors.Err(err)
}
err = os.Mkdir(blobsDir, 0777)
if err != nil {
return errors.Err(err)
}
lbryumDir := GetLbryumDir()
ledger := "lbc_mainnet"
if IsRegTest() {
ledger = "lbc_regtest"
}
lbryumDir = lbryumDir + ledger
files, err = filepath.Glob(lbryumDir + "/blockchain.db*")
if err != nil {
return errors.Err(err)
}
for _, f := range files {
err = os.Remove(f)
if err != nil {
return errors.Err(err)
}
}
return nil
}
var metadataDirInitialized = false
func GetVideoMetadataDir() string {
dir := "./videos_metadata"
if !metadataDirInitialized {
metadataDirInitialized = true
_ = os.MkdirAll(dir, 0755)
}
return dir
}
func CleanupMetadata() error {
dir := GetVideoMetadataDir()
err := os.RemoveAll(dir)
if err != nil {
return errors.Err(err)
}
metadataDirInitialized = false
return nil
}
func SleepUntilQuotaReset() {
PST, _ := time.LoadLocation("America/Los_Angeles")
t := time.Now().In(PST)
n := time.Date(t.Year(), t.Month(), t.Day(), 24, 2, 0, 0, PST)
d := n.Sub(t)
if d < 0 {
n = n.Add(24 * time.Hour)
d = n.Sub(t)
}
log.Infof("gotta sleep %s until the quota resets", d.String())
time.Sleep(d)
}
func StartDaemon() error {
start := time.Now()
defer func(start time.Time) {
timing.TimedComponent("startDaemon").Add(time.Since(start))
}(start)
if IsUsingDocker() {
return startDaemonViaDocker()
}
return startDaemonViaSystemd()
}
func StopDaemon() error {
start := time.Now()
defer func(start time.Time) {
timing.TimedComponent("stopDaemon").Add(time.Since(start))
}(start)
if IsUsingDocker() {
return stopDaemonViaDocker()
}
return stopDaemonViaSystemd()
}
func startDaemonViaDocker() error {
container, err := GetLBRYNetContainer(true)
if err != nil {
return err
}
cli, err := client.NewEnvClient()
if err != nil {
panic(err)
}
err = cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{})
if err != nil {
return errors.Err(err)
}
return nil
}
func stopDaemonViaDocker() error {
container, err := GetLBRYNetContainer(ONLINE)
if err != nil {
return err
}
cli, err := client.NewEnvClient()
if err != nil {
panic(err)
}
err = cli.ContainerStop(context.Background(), container.ID, nil)
if err != nil {
return errors.Err(err)
}
return nil
}
func startDaemonViaSystemd() error {
err := exec.Command("/usr/bin/sudo", "/bin/systemctl", "start", "lbrynet.service").Run()
if err != nil {
return errors.Err(err)
}
return nil
}
func stopDaemonViaSystemd() error {
err := exec.Command("/usr/bin/sudo", "/bin/systemctl", "stop", "lbrynet.service").Run()
if err != nil {
return errors.Err(err)
}
return nil
}
func GetDefaultWalletPath() string {
defaultWalletDir := os.Getenv("HOME") + "/.lbryum/wallets/default_wallet"
if IsRegTest() {
defaultWalletDir = os.Getenv("HOME") + "/.lbryum_regtest/wallets/default_wallet"
}
walletPath := os.Getenv("LBRYUM_DIR")
if walletPath != "" {
defaultWalletDir = walletPath + "/wallets/default_wallet"
}
return defaultWalletDir
}
func GetBlockchainDBPath() string {
lbryumDir := os.Getenv("LBRYUM_DIR")
if lbryumDir == "" {
if IsRegTest() {
lbryumDir = os.Getenv("HOME") + "/.lbryum_regtest"
} else {
lbryumDir = os.Getenv("HOME") + "/.lbryum"
}
}
defaultDB := lbryumDir + "/lbc_mainnet/blockchain.db"
if IsRegTest() {
defaultDB = lbryumDir + "/lbc_regtest/blockchain.db"
}
return defaultDB
}
func GetBlockchainDirectoryName() string {
ledger := "lbc_mainnet"
if IsRegTest() {
ledger = "lbc_regtest"
}
return ledger
}
func DirSize(path string) (int64, error) {
var size int64
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
size += info.Size()
}
return err
})
return size, err
}

388
ytapi/ytapi.go Normal file
View file

@ -0,0 +1,388 @@
package ytapi
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/lbryio/ytsync/v5/shared"
logUtils "github.com/lbryio/ytsync/v5/util"
"github.com/vbauerster/mpb/v7"
"github.com/lbryio/ytsync/v5/downloader/ytdl"
"github.com/lbryio/ytsync/v5/downloader"
"github.com/lbryio/ytsync/v5/ip_manager"
"github.com/lbryio/ytsync/v5/sdk"
"github.com/lbryio/ytsync/v5/sources"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/jsonrpc"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/extras/util"
log "github.com/sirupsen/logrus"
)
type Video interface {
Size() *int64
ID() string
IDAndNum() string
PlaylistPosition() int
PublishedAt() time.Time
Sync(*jsonrpc.Client, sources.SyncParams, *sdk.SyncedVideo, bool, *sync.RWMutex, *sync.WaitGroup, *mpb.Progress) (*sources.SyncSummary, error)
}
type byPublishedAt []Video
func (a byPublishedAt) Len() int { return len(a) }
func (a byPublishedAt) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byPublishedAt) Less(i, j int) bool { return a[i].PublishedAt().Before(a[j].PublishedAt()) }
type VideoParams struct {
VideoDir string
Stopper *stop.Group
IPPool *ip_manager.IPPool
}
var mostRecentlyFailedChannel string // TODO: fix this hack!
func GetVideosToSync(channelID string, syncedVideos map[string]sdk.SyncedVideo, quickSync bool, maxVideos int, videoParams VideoParams, lastUploadedVideo string) ([]Video, error) {
var videos []Video
if quickSync && maxVideos > 50 {
maxVideos = 50
}
allVideos, err := downloader.GetPlaylistVideoIDs(channelID, maxVideos, videoParams.Stopper.Ch(), videoParams.IPPool)
if err != nil {
return nil, errors.Err(err)
}
videoIDs := make([]string, 0, len(allVideos))
for _, video := range allVideos {
sv, ok := syncedVideos[video]
if ok && util.SubstringInSlice(sv.FailureReason, shared.NeverRetryFailures) {
continue
}
videoIDs = append(videoIDs, video)
}
log.Infof("Got info for %d videos from youtube downloader", len(videoIDs))
playlistMap := make(map[string]int64)
for i, videoID := range videoIDs {
playlistMap[videoID] = int64(i)
}
//this will ensure that we at least try to sync the video that was marked as last uploaded video in the database.
if lastUploadedVideo != "" {
_, ok := playlistMap[lastUploadedVideo]
if !ok {
playlistMap[lastUploadedVideo] = 0
videoIDs = append(videoIDs, lastUploadedVideo)
}
}
if len(videoIDs) < 1 {
if channelID == mostRecentlyFailedChannel {
return nil, errors.Err("playlist items not found")
}
mostRecentlyFailedChannel = channelID
}
vids, err := getVideos(channelID, videoIDs, videoParams.Stopper.Ch(), videoParams.IPPool)
if err != nil {
return nil, err
}
for _, item := range vids {
positionInList := playlistMap[item.ID]
videoToAdd, err := sources.NewYoutubeVideo(videoParams.VideoDir, item, positionInList, videoParams.Stopper, videoParams.IPPool)
if err != nil {
return nil, errors.Err(err)
}
videos = append(videos, videoToAdd)
}
for k, v := range syncedVideos {
newMetadataVersion := int8(2)
if !v.Published && v.MetadataVersion >= newMetadataVersion {
continue
}
if _, ok := playlistMap[k]; !ok {
videos = append(videos, sources.NewMockedVideo(videoParams.VideoDir, k, channelID, videoParams.Stopper, videoParams.IPPool))
}
}
sort.Sort(byPublishedAt(videos))
return videos, nil
}
// CountVideosInChannel is unused for now... keeping it here just in case
func CountVideosInChannel(channelID string) (int, error) {
url := "https://socialblade.com/youtube/channel/" + channelID
req, _ := http.NewRequest("GET", url, nil)
req.Header.Add("User-Agent", downloader.ChromeUA)
req.Header.Add("Accept", "*/*")
req.Header.Add("Host", "socialblade.com")
res, err := http.DefaultClient.Do(req)
if err != nil {
return 0, errors.Err(err)
}
defer res.Body.Close()
var line string
scanner := bufio.NewScanner(res.Body)
for scanner.Scan() {
if strings.Contains(scanner.Text(), "youtube-stats-header-uploads") {
line = scanner.Text()
break
}
}
if err := scanner.Err(); err != nil {
return 0, err
}
if line == "" {
return 0, errors.Err("upload count line not found")
}
matches := regexp.MustCompile(">([0-9]+)<").FindStringSubmatch(line)
if len(matches) != 2 {
return 0, errors.Err("upload count not found with regex")
}
num, err := strconv.Atoi(matches[1])
if err != nil {
return 0, errors.Err(err)
}
return num, nil
}
func ChannelInfo(channelID string) (*YoutubeStatsResponse, error) {
url := "https://www.youtube.com/channel/" + channelID + "/about"
req, _ := http.NewRequest("GET", url, nil)
req.Header.Add("User-Agent", downloader.ChromeUA)
req.Header.Add("Accept", "*/*")
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, errors.Err(err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, errors.Err(err)
}
pageBody := string(body)
dataStartIndex := strings.Index(pageBody, "window[\"ytInitialData\"] = ") + 26
if dataStartIndex == 25 {
dataStartIndex = strings.Index(pageBody, "var ytInitialData = ") + 20
}
dataEndIndex := strings.Index(pageBody, "]}}};") + 4
if dataEndIndex < dataStartIndex {
return nil, errors.Err("start index is lower than end index. cannot extract channel info!")
}
data := pageBody[dataStartIndex:dataEndIndex]
var decodedResponse YoutubeStatsResponse
err = json.Unmarshal([]byte(data), &decodedResponse)
if err != nil {
return nil, errors.Err(err)
}
return &decodedResponse, nil
}
func getVideos(channelID string, videoIDs []string, stopChan stop.Chan, ipPool *ip_manager.IPPool) ([]*ytdl.YtdlVideo, error) {
config := sdk.GetAPIsConfigs()
var videos []*ytdl.YtdlVideo
for _, videoID := range videoIDs {
if len(videoID) < 5 {
continue
}
select {
case <-stopChan:
return videos, errors.Err("interrupted by user")
default:
}
state, err := config.VideoState(videoID)
if err != nil {
return nil, errors.Err(err)
}
if state == "published" {
continue
}
video, err := downloader.GetVideoInformation(videoID, stopChan, ipPool)
if err != nil {
errSDK := config.MarkVideoStatus(shared.VideoStatus{
ChannelID: channelID,
VideoID: videoID,
Status: "failed",
FailureReason: err.Error(),
})
logUtils.SendErrorToSlack(fmt.Sprintf("Skipping video (%s): %s", videoID, errors.FullTrace(err)))
if errSDK != nil {
return nil, errors.Err(errSDK)
}
} else {
videos = append(videos, video)
}
}
return videos, nil
}
type YoutubeStatsResponse struct {
Contents struct {
TwoColumnBrowseResultsRenderer struct {
Tabs []struct {
TabRenderer struct {
Title string `json:"title"`
Selected bool `json:"selected"`
Content struct {
SectionListRenderer struct {
Contents []struct {
ItemSectionRenderer struct {
Contents []struct {
ChannelAboutFullMetadataRenderer struct {
Description struct {
SimpleText string `json:"simpleText"`
} `json:"description"`
ViewCountText struct {
SimpleText string `json:"simpleText"`
} `json:"viewCountText"`
JoinedDateText struct {
Runs []struct {
Text string `json:"text"`
} `json:"runs"`
} `json:"joinedDateText"`
CanonicalChannelURL string `json:"canonicalChannelUrl"`
BypassBusinessEmailCaptcha bool `json:"bypassBusinessEmailCaptcha"`
Title struct {
SimpleText string `json:"simpleText"`
} `json:"title"`
Avatar struct {
Thumbnails []struct {
URL string `json:"url"`
Width int `json:"width"`
Height int `json:"height"`
} `json:"thumbnails"`
} `json:"avatar"`
ShowDescription bool `json:"showDescription"`
DescriptionLabel struct {
Runs []struct {
Text string `json:"text"`
} `json:"runs"`
} `json:"descriptionLabel"`
DetailsLabel struct {
Runs []struct {
Text string `json:"text"`
} `json:"runs"`
} `json:"detailsLabel"`
ChannelID string `json:"channelId"`
} `json:"channelAboutFullMetadataRenderer"`
} `json:"contents"`
} `json:"itemSectionRenderer"`
} `json:"contents"`
} `json:"sectionListRenderer"`
} `json:"content"`
} `json:"tabRenderer"`
} `json:"tabs"`
} `json:"twoColumnBrowseResultsRenderer"`
} `json:"contents"`
Header struct {
C4TabbedHeaderRenderer struct {
ChannelID string `json:"channelId"`
Title string `json:"title"`
Avatar struct {
Thumbnails []struct {
URL string `json:"url"`
Width int `json:"width"`
Height int `json:"height"`
} `json:"thumbnails"`
} `json:"avatar"`
Banner struct {
Thumbnails []struct {
URL string `json:"url"`
Width int `json:"width"`
Height int `json:"height"`
} `json:"thumbnails"`
} `json:"banner"`
VisitTracking struct {
RemarketingPing string `json:"remarketingPing"`
} `json:"visitTracking"`
SubscriberCountText struct {
SimpleText string `json:"simpleText"`
} `json:"subscriberCountText"`
} `json:"c4TabbedHeaderRenderer"`
} `json:"header"`
Metadata struct {
ChannelMetadataRenderer struct {
Title string `json:"title"`
Description string `json:"description"`
RssURL string `json:"rssUrl"`
ChannelConversionURL string `json:"channelConversionUrl"`
ExternalID string `json:"externalId"`
Keywords string `json:"keywords"`
OwnerUrls []string `json:"ownerUrls"`
Avatar struct {
Thumbnails []struct {
URL string `json:"url"`
Width int `json:"width"`
Height int `json:"height"`
} `json:"thumbnails"`
} `json:"avatar"`
ChannelURL string `json:"channelUrl"`
IsFamilySafe bool `json:"isFamilySafe"`
VanityChannelURL string `json:"vanityChannelUrl"`
} `json:"channelMetadataRenderer"`
} `json:"metadata"`
Topbar struct {
DesktopTopbarRenderer struct {
CountryCode string `json:"countryCode"`
} `json:"desktopTopbarRenderer"`
} `json:"topbar"`
Microformat struct {
MicroformatDataRenderer struct {
URLCanonical string `json:"urlCanonical"`
Title string `json:"title"`
Description string `json:"description"`
Thumbnail struct {
Thumbnails []struct {
URL string `json:"url"`
Width int `json:"width"`
Height int `json:"height"`
} `json:"thumbnails"`
} `json:"thumbnail"`
SiteName string `json:"siteName"`
AppName string `json:"appName"`
AndroidPackage string `json:"androidPackage"`
IosAppStoreID string `json:"iosAppStoreId"`
IosAppArguments string `json:"iosAppArguments"`
OgType string `json:"ogType"`
URLApplinksWeb string `json:"urlApplinksWeb"`
URLApplinksIos string `json:"urlApplinksIos"`
URLApplinksAndroid string `json:"urlApplinksAndroid"`
URLTwitterIos string `json:"urlTwitterIos"`
URLTwitterAndroid string `json:"urlTwitterAndroid"`
TwitterCardType string `json:"twitterCardType"`
TwitterSiteHandle string `json:"twitterSiteHandle"`
SchemaDotOrgType string `json:"schemaDotOrgType"`
Noindex bool `json:"noindex"`
Unlisted bool `json:"unlisted"`
FamilySafe bool `json:"familySafe"`
Tags []string `json:"tags"`
} `json:"microformatDataRenderer"`
} `json:"microformat"`
}

13
ytapi/ytapi_test.go Normal file
View file

@ -0,0 +1,13 @@
package ytapi
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestChannelInfo(t *testing.T) {
info, err := ChannelInfo("UCNQfQvFMPnInwsU_iGYArJQ")
assert.NoError(t, err)
assert.NotNil(t, info)
}