Compare commits

..

336 commits

Author SHA1 Message Date
Niko Storni
a01aa6dc06 upgrade dependencies 2023-03-07 19:14:31 +01:00
Niko Storni
e6a3f40029 Merge branch 'scheduled-unlisted' 2022-11-01 22:28:32 +01:00
Niko Storni
ced09b22ca convert type to variadic 2022-11-01 22:28:18 +01:00
Thomas Zarebczan
fa55e82bc1
feat: add scheduled and unlisted 2022-11-01 16:59:07 -04:00
Niko Storni
77944ba3af fix nil ptr 2022-09-27 22:56:34 +02:00
Niko Storni
5f52a995a7 retry failed messages 2022-09-27 22:48:51 +02:00
Niko Storni
014adbb315 actually use the new client 2022-09-27 21:35:13 +02:00
Niko Storni
73228d1bfb use custom http client for slack messages 2022-09-27 20:21:02 +02:00
Niko Storni
69cfd7f798 add account_send
adjust transaction summary fields
2022-09-21 22:53:02 +02:00
Niko Storni
41555cbda2 add helper for special claim tags 2022-09-16 03:18:01 +02:00
Niko Storni
2adb8af5b6 add funding accounts to channel create options 2022-08-15 22:41:00 +02:00
Mark Beamer Jr
9130630afe
Added additional functionality to allow for both objects and arrays to be returned from internal-apis client.
Also added a raw API url call and converted current call to a call to a resource so we are not restricted to that format to use the library.
2022-08-05 13:51:05 -04:00
Alex Grin
8e6d493fbf
Merge pull request #93 from andybeletsky/master
Add metadata to error returned by jsonrpc
2022-06-15 11:11:14 -04:00
Andrey Beletsky
e19facdded Add metadata to error returned by jsonrpc 2022-06-15 00:49:46 +07:00
Niko Storni
365d23f0e2 add support for deterministic pub keys
fix a couple of bugs
2022-06-10 18:18:26 +02:00
Niko Storni
e5ab0f883e update dependencies and go 2022-05-04 18:27:35 +02:00
Alex Grin
d0aeb0c22b
Merge pull request #91 from lbryio/bugfix/jeffreypicard/handle_colons_correctly 2022-03-21 14:25:39 -04:00
Jeffrey Picard
306db74279 We no longer use colons for sequence numbers in urls 2022-03-18 16:27:02 -04:00
Mark Beamer Jr
a0391bec79
Extend claim search for use in livestreaming 2022-02-08 16:00:38 -05:00
Alex Grin
5d62502bde
Update readme.md 2022-01-17 09:40:58 -05:00
Alex Grin
91ac7abf08
Merge pull request #90 from lbryio/fix_dht 2021-10-05 09:03:23 -04:00
Victor Shyba
8161f48c15 apply gofmt 2021-10-04 23:21:59 -03:00
Victor Shyba
d11230aaf8 show results over RPC 2021-10-03 04:53:41 -03:00
Victor Shyba
8fd87dfc31 parse page and always try to parse what is left as the result 2021-10-03 04:53:36 -03:00
Victor Shyba
4056c44c2e encode contacts as hex to be friendly on RPC return 2021-10-03 04:49:33 -03:00
Victor Shyba
dd451eb72b alpha was increased to 5 2021-10-03 04:49:04 -03:00
Alex Grin
a553e18d3b
Update readme.md 2021-09-28 10:15:05 -04:00
Niko Storni
3e18b74da0 fix stream by magic
upgrade to go 1.16
2021-08-24 11:46:06 -04:00
Mark Beamer Jr
55dceeaa4e
Add OAuth client for internal-apis to be used for odysee-apis 2021-08-08 14:09:19 -04:00
Mark Beamer Jr
a1177c17d3
Status error should default to internal server error. Otherwise it will trigger a nil pointer and subsequent panic. 2021-08-08 14:09:19 -04:00
Alex Grin
2b155597bf
Merge pull request #89 from jeffreypicard/add_utility_funcs 2021-06-25 10:50:58 -04:00
Jeffrey Picard
87bf89a109 Cleanup utility functions and add comments 2021-06-23 15:29:23 -04:00
Jeffrey Picard
931d786c52 Add utility functions from hub 2021-06-17 23:59:06 -04:00
Mark Beamer Jr
6516df1418
Add signing channel to transaction (transaction_show) 2021-04-16 15:53:22 -04:00
Mark Beamer Jr
3027fb9b98
Add transaction show API to library. 2021-04-15 16:03:45 -04:00
Niko Storni
ed51ece75c revert is_spent changes because booleans suddenly have a single state
present (doesn't matter if true or false) and missing.
TIL /s
2021-04-13 00:29:18 +02:00
Alex Grintsvayg
e00cdd0237
upgrade go-errors, which adds errors.Is compat 2021-04-02 14:24:09 -04:00
Alex Grintsvayg
6bc878d657
terminate stream after consuming all the data 2021-04-02 14:16:46 -04:00
Alex Grintsvayg
be64130ae1
json convenience method 2021-04-02 12:57:06 -04:00
Alex Grintsvayg
419e7c88a3
switch to io.Reader interface for stream creation 2021-04-01 17:01:49 -04:00
Mark Beamer Jr
988178df50
Move signature to keys package 2021-03-15 20:00:44 -04:00
Mark Beamer Jr
a365d63d16
Fix up PrivateKeyToDER function and add tests 2021-03-14 12:26:53 -04:00
Mark Beamer Jr
bd452c421f
Add PrivateKeyToDER function 2021-03-14 04:17:27 -04:00
Niko Storni
4c3372992c improve claim listing 2021-03-12 02:06:52 +01:00
Mark
3c99b84721
Merge pull request #86 from lbryio/cors
Cors
2021-03-10 21:04:16 -05:00
Mark Beamer Jr
d7e84c6b97
Add CORS to api server configuration 2021-03-10 20:55:59 -05:00
Mark Beamer Jr
4580a95b74
Add CORS to api server 2021-03-10 20:04:48 -05:00
Andrey Beletsky
29773829af
Merge pull request #85 from lbryio/lbryinc-errors
Lbryinc errors
2021-02-23 00:37:59 +07:00
Andrey Beletsky
ef1b43ac62 Do not treat server errors as API originated errors 2021-02-16 19:40:18 +07:00
Andrey Beletsky
39e5821760 Run gofmt on validate.go 2021-02-16 18:38:34 +07:00
Andrey Beletsky
cb68cb004e Fix travis go version 2021-02-16 18:34:53 +07:00
Andrey Beletsky
eb6bb93500 Discern API errors from transport level errors 2021-02-16 18:30:09 +07:00
Andrey Beletsky
d0df93ebac Update go to 1.15, update testify library 2021-02-16 18:29:41 +07:00
Alex Grintsvayg
8c41d8ccd9
add akins url parsing 2020-12-22 16:31:18 -05:00
Mark
e9753ffdc7
Merge pull request #84 from lbryio/stake_supports
Rename packages to represent stakes (claims || supports)
2020-11-18 15:36:19 -05:00
Mark Beamer Jr
69e03da94a
Add extraction of PrivateKey from pem 2020-11-18 01:30:50 -05:00
Mark Beamer Jr
b3f7657c1b
Rename packages to represent stakes (claims || supports) instead of just claim 2020-10-28 03:28:48 -04:00
Mark Beamer Jr
29574578c1
Add strings.go utility 2020-09-01 14:36:59 -04:00
Niko Storni
73382bb021 fix lbry.go dep ver 2020-09-01 19:58:08 +02:00
Niko Storni
69e2f6231c update modules 2020-09-01 19:55:06 +02:00
Alex Grintsvayg
fb88808c97
update lbryschema imports 2020-09-01 13:45:55 -04:00
Alex Grintsvayg
a16797cc53
merge in lbryschema.go 2020-09-01 13:31:15 -04:00
Alex Grintsvayg
b14fb6c18b
prep repo for merge into lbry.go 2020-09-01 13:30:36 -04:00
Niko Storni
8db975b532 fix go modules 2020-09-01 18:46:56 +02:00
Alex Grintsvayg
fcade74753
export query.Placeholders 2020-07-10 14:01:40 -04:00
Niko Storni
f1d8bc0ffc Merge branch 'improvement/get_response' 2020-06-03 22:12:05 +02:00
Andrey Beletsky
9d8b9330f9 Add PurchaseReceipt to Claim 2020-06-03 22:11:42 +02:00
Andrey Beletsky
3ae040d677 Remove a reference to points_paid field in get response 2020-06-03 22:11:42 +02:00
Alex Grintsvayg
07d2d00b0d
add wallet id to transaction_list call 2020-05-14 16:53:22 -04:00
Niko Storni
86d0678274 align SDK to v0.73.1 2020-05-14 05:39:51 +02:00
Niko Storni
bdf6240ed8 fix json value (woops) 2020-04-01 04:26:27 +02:00
Niko Storni
e8b93e3bb1 fix utxo list response 2020-04-01 04:23:59 +02:00
Niko Storni
9a9be92d27 include full transaction response 2020-03-31 04:11:44 +02:00
Niko Storni
414be62d61 add support for txo_spend 2020-03-31 04:05:43 +02:00
Alex Grintsvayg
21017a38a7
update dht seed nodes 2020-03-16 11:19:19 -04:00
Mark Beamer Jr
1a30fb743b
Update Simple Send to work with internal-apis rewards. 2020-02-04 00:32:43 -05:00
Thomas Zarebczan
db8aa21b35
Merge pull request #79 from ykris45/patch-1
Update LICENSE
2020-02-03 17:13:08 -05:00
Mark
c4772e61c5
Merge pull request #80 from lbryio/lbrycrd_go
Add chain params to lbrycrd client creation
2020-02-03 00:35:42 -05:00
Mark Beamer Jr
9541d765a9
Add chain params to lbrycrd client creation
Add Bech notation to alternative chain params
2020-02-03 00:12:45 -05:00
YULIUS KURNIAWAN KRISTIANTO
f9717328bc
Update LICENSE 2020-02-03 05:59:07 +07:00
Alex Grintsvayg
1835deb2c9
fix mainnet bech32 prefix 2020-01-08 20:45:10 -05:00
Mark Beamer Jr
dc6b15a372
add floats 2020-01-05 03:31:47 -05:00
Mark Beamer Jr
635bf931c8
Add PtrToNullTime 2019-12-23 01:51:02 -05:00
Andrey Beletsky
d12e431b40 Add has_verified_email method to lbryinc client 2019-12-17 21:06:52 +07:00
Mark Beamer Jr
8f8b2e605b
Add ability to specify the format of the query parameters via json tag. 2019-12-15 01:37:44 -05:00
Mark Beamer Jr
9b6fea461c
Add ability to control the format of the json response from api server. 2019-12-11 21:21:21 -05:00
Mark Beamer Jr
a391c83a2f
Add ordered_map to lbry.go for use in other repositories. 2019-12-11 20:39:38 -05:00
Niko Storni
06764c3d00
add support for SDK 0.48.1 2019-12-06 14:43:19 -05:00
Niko Storni
b68c49ac2f big brain fart, wrong repo 2019-11-20 11:16:13 -05:00
Niko Storni
96ace4b850 disable click tracking for verifications 2019-11-20 11:15:03 -05:00
Niko Storni
6782b19c3d Merge branch 'fund-ids' 2019-11-19 19:31:53 -05:00
Niko Storni
bcadcd1eca add funding ids for claim operations 2019-11-12 18:57:10 -05:00
Niko
ef1b10b601
Merge pull request #75 from lbryio/replace-v2
V2 update
2019-10-25 16:49:59 +02:00
Andrey Beletsky
aec378bb36 Fix failing wallet command test 2019-10-18 12:40:17 +07:00
Niko Storni
fa9bab156a revert btcutil update 2019-10-10 16:41:33 +02:00
Niko Storni
de5b32b1b5 update all modules 2019-10-10 05:16:23 +02:00
Niko Storni
dd2171172d update to v2 2019-10-10 05:07:33 +02:00
Niko Storni
1155ea6b9d add stream_list 2019-10-10 04:29:11 +02:00
Alex Grin
6918a2436a
Merge pull request #74 from StrikerRUS/patch-1
bump year in license
2019-10-09 10:37:13 -04:00
Nikita Titov
7e524c6b08
bump year in license 2019-10-09 00:06:05 +03:00
sayplastic
9c278d131d
Merge pull request #73 from lbryio/feature/wallet
Add wallet commands to JSON-RPC client
2019-10-08 13:08:57 +07:00
Niko Storni
b1c090e28d fix claim search not returning claim values 2019-10-03 22:50:07 +02:00
Andrey Beletsky
b03e0a7db8 Go mod tidy 2019-10-04 01:09:42 +07:00
Andrey Beletsky
33071ff6c1 Add test for wallet-less ChannelImport call 2019-10-04 00:51:24 +07:00
Andrey Beletsky
30a9a39248 Add ChannelImport method 2019-10-03 20:03:31 +07:00
Andrey Beletsky
49c9531404 Fix wallet opts passing 2019-10-02 14:56:37 +07:00
Andrey Beletsky
c68c7e05fe Bring back versioning to the module 2019-10-02 14:36:04 +07:00
Andrey Beletsky
534831e6e5 Improve args passing to SDK for WalletCreate command 2019-10-02 14:29:13 +07:00
Niko Storni
deab868c53 add bid to channel update params 2019-09-27 15:36:23 +02:00
Andrey Beletsky
d5102a9cf6 Add WalletList command 2019-09-25 18:00:37 +07:00
Andrey Beletsky
ac75979453 Add wallet commands to JSON-RPC client 2019-09-25 17:47:12 +07:00
Niko Storni
712e346bd2 woopsy 2019-09-24 20:37:03 +02:00
Niko Storni
85e34cb335 add accountAdd support
refactor account management
2019-09-24 20:35:04 +02:00
Niko Storni
c36c67961f Merge branch 'accountid-and-missing' 2019-09-24 18:31:17 +02:00
Niko Storni
025c715ab4 align to new SDK v0.42.0 2019-09-24 18:31:01 +02:00
Thomas Zarebczan
af728f12d9 add new types
Wasn't sure about type `UTXOReleaseResponse *string` 

This is the response: ```
{
  "jsonrpc": "2.0",
  "result": null
}
```
2019-09-24 16:04:13 +02:00
Thomas Zarebczan
1f1848a408 Adds account_id to channel create (and update), missing functions 2019-09-24 16:04:13 +02:00
Niko Storni
d032c842d5 remove problematic data 2019-09-24 16:03:49 +02:00
Niko Storni
d6f5199acd tidy up go mods 2019-09-24 16:02:26 +02:00
sayplastic
28aad86e4a
Merge pull request #72 from lbryio/feature/remote_ip
Forward real IP in internal-api client calls
2019-09-20 13:52:07 +07:00
Andrey Beletsky
c5c634e477 Pass user method url as a constant 2019-09-20 13:47:45 +07:00
Andrey Beletsky
a8c339e5b4 Forward real IP in internal-api client calls 2019-09-18 18:02:03 +07:00
Alex Grintsvayg
fd916d9eae
expose blob hash sizes as constants 2019-09-10 16:42:36 -04:00
Niko Storni
2d45f059ec improve error handling on json marshalling 2019-09-10 16:07:44 +02:00
sayplastic
e5850035dd
Merge pull request #70 from lbryio/feature/stream_create_account_id
Add account ID to StreamCreateOptions
2019-09-03 01:08:10 +07:00
Andrey Beletsky
0cfc8e230c Add account ID to StreamCreateOptions 2019-09-03 00:47:52 +07:00
Niko Storni
f3a1fbdd53 use the right library 2019-08-28 15:12:28 +02:00
Mark
ecbd404da0
Merge pull request #69 from lbryio/supports
Add ClaimSupport ( tippable too)
2019-08-27 23:12:44 -04:00
Mark Beamer Jr
2768cdd312
Add ClaimSupport ( tippable too) 2019-08-27 21:31:39 -04:00
Mark
564595cfc3
Merge pull request #66 from lbryio/claims
Add claim management for lbrycrd client
2019-08-27 21:05:07 -04:00
Niko Storni
3a3377d0e5 add support related commands
improve tests
2019-08-27 00:25:47 +02:00
Niko Storni
38861421f8 Merge branch '0.39.0_changes' 2019-08-26 23:15:58 +02:00
Mark Beamer Jr
a7bb3cf336 Update AccountBalanceResponse 2019-08-26 23:15:47 +02:00
Mark Beamer Jr
8fa28d3d65
Remove wait function for Group - recursive call. 2019-08-25 16:20:01 -04:00
Mark Beamer Jr
f8a231286a
Remove listwaitingon from Wait call. Too much logging. 2019-08-25 16:14:50 -04:00
Mark Beamer Jr
ea5b70e8fc
Remove unlocked since its there already. 2019-08-25 16:12:21 -04:00
Mark Beamer Jr
26b0c7356d
Add additional logging to wait call to know the list of routines active when wait is called. 2019-08-25 16:09:50 -04:00
Mark Beamer Jr
7558397877
Add logging to address decode error
Add test for real main net address
Add name to mainnet chain params
2019-08-24 21:04:24 -04:00
Niko Storni
fb7d045753 fix claim_search 2019-08-16 13:45:40 +02:00
Mark Beamer Jr
4d17553a23
Add claim management for lbrycrd client 2019-08-14 00:44:27 -04:00
Alex Grintsvayg
40633c949e
GetInfo() dropped in latest lbrycrdd 2019-08-07 08:40:26 -04:00
Mark
a17fa3ad5f
Merge pull request #64 from lbryio/channelexport
Add channel export call
2019-07-28 10:37:01 -04:00
Mark Beamer Jr
560858ba0a
Add channel export call 2019-07-28 10:05:18 -04:00
Niko Storni
ada0ce0484 don't panic if magic doesn't work 2019-07-10 17:57:04 +02:00
Mark
461ae6a16b
Merge pull request #61 from lbryio/go_sdk38_client_updates
Add updated structure for response api call based on SDK 38 release.
2019-07-09 21:47:30 -04:00
Niko Storni
31456e7bae remove timestamp for now 2019-06-25 21:20:29 -04:00
Niko Storni
060970b7c0 stupid IDE 2019-06-25 21:07:08 -04:00
Niko Storni
0f894aaecc fix timestamp in utxo list 2019-06-25 21:04:10 -04:00
Niko Storni
8367901104 fix utxo list response 2019-06-25 20:34:51 -04:00
Mark Beamer Jr
7e445e0cf8
Add updated structure for response api call based on SDK 38 release. 2019-06-24 00:29:15 -04:00
Niko Storni
f5de4e96c3 don't overwrite values when omitted 2019-06-12 22:18:03 +02:00
Niko Storni
6226c2690d really fix account_set 2019-06-12 05:23:48 +02:00
Niko Storni
3335233566 fix account_set 2019-06-12 04:43:27 +02:00
Niko Storni
faac895509 fix test
I hit push accidentally
2019-06-12 02:54:18 +02:00
Niko Storni
bc9886b3e8 add ability to use everything for account_fund 2019-06-12 02:53:42 +02:00
Niko Storni
963177cd4c add blocking feature to certain SDK calls 2019-06-11 18:40:48 +02:00
Mark Beamer Jr
6d2f69a36f
remove address validation 2019-06-02 13:32:30 -04:00
Niko Storni
b444d284c6 align params to sdk 2019-06-01 03:38:48 +02:00
Niko Storni
f853f2a00b prevent overwriting properties when values not passed 2019-06-01 03:17:10 +02:00
Niko Storni
a8afd1621e fix stream abandon 2019-05-30 00:18:23 +02:00
Niko Storni
88aad289cf extra fixes 2019-05-28 19:27:04 +02:00
Niko Storni
de1476becf align to SDK release
fix bugs
2019-05-28 18:29:14 +02:00
sayplastic
6ae869bdb3
Merge pull request #60 from lbryio/feature/account_remove
Add singular account_list method, account_remove
2019-05-24 22:06:04 +07:00
Andrey Beletsky
818b00190d Cleanup accounts jsonrpc code 2019-05-24 21:52:04 +07:00
Andrey Beletsky
630260d0b1 Fix AccountRemove test 2019-05-23 00:58:42 +07:00
Andrey Beletsky
15a137c502 Tidy up go.mod 2019-05-22 22:44:47 +07:00
Andrey Beletsky
438be5767a Remove Account from AccountCreateResponse 2019-05-22 18:32:20 +07:00
Andrey Beletsky
6c7f59bfb8 Fix AccountRemoveResponse declaration 2019-05-22 11:45:10 +07:00
Andrey Beletsky
7220ec8943 Use Account struct instead of derived SingleAccountListResponse 2019-05-22 11:40:18 +07:00
Andrey Beletsky
3615ab5e55 Attempt to fix account_create method 2019-05-22 01:17:22 +07:00
Andrey Beletsky
7ba00dffcc Add singular account_list method, account_remove 2019-05-22 00:12:00 +07:00
Niko Storni
76b73ef0c7 rollback btcd libraries 2019-05-16 22:46:27 +02:00
sayplastic
5424746066
Merge pull request #59 from lbryio/feature/lbryinc3
Add internal-apis client
2019-05-10 20:16:04 +07:00
Andrey Beletsky
b6b411847a Fix lbryinc client stylistics after code review 2019-05-10 20:10:56 +07:00
Andrey Beletsky
6bda111ffa Add internal-apis client 2019-05-10 19:47:49 +07:00
Niko Storni
77ea5aa522 update readme with versioning instructions
fix travis
2019-05-10 14:37:16 +02:00
Niko
abb34bf2dc
Merge pull request #50 from lbryio/lbrynet-updates
lbrynet SDK updates (support for lbrynet 0.37.*)
2019-05-10 14:16:28 +02:00
Niko Storni
c3aeba7e17 add deploy script for semver 2019-05-10 14:10:56 +02:00
Niko Storni
8e52397bb4
cleanup branch 2019-05-10 14:04:17 +02:00
Niko Storni
ebc3b6a55d
add channelUpdate
fix channel create params
2019-05-10 14:04:17 +02:00
Niko Storni
7b0a5ae028
update claim structure
fix bug in stream update
2019-05-10 14:04:17 +02:00
Niko Storni
13ef7571b2
parameterize blockchain name 2019-05-10 14:04:16 +02:00
Niko Storni
b63ee94973
add GetStreamSize
BY MAGIC!

add fileSize for stream update

fix mistake
2019-05-10 14:04:16 +02:00
Andrey Beletsky
2ae66ad3f7
Update lbryschema version
add file size param
2019-05-10 14:04:14 +02:00
Mark Beamer Jr
52702e2226
Added support for 0.36.0 of lbrynet.
upgraded to latest types/schema repo

fix several bugs

(this only works on mainnet)
2019-05-10 14:04:13 +02:00
Niko Storni
f827da4c61
reinstate claim type
fix bug

adjust types

add temporary video_duration until the SDK patches it
2019-05-10 14:04:12 +02:00
Niko Storni
3d2986a85c
add accountSet
fix bugs

fix rebase conflicts
2019-05-10 14:04:08 +02:00
Mark Beamer Jr
1d3e6c524b
removed Claim_Type_name as it is no longer used in latest types repo
more fixes
2019-05-10 14:04:04 +02:00
Andrey Beletsky
5f847fb035
Add AccountCreate method 2019-05-10 14:04:02 +02:00
Niko Storni
6f0d34f863
Update to ALMOST support 0.35 with new metadata 2019-05-10 14:04:02 +02:00
Niko Storni
41b4a3684a
update status response
wording

Make claim_sequence a signed int
2019-05-10 14:04:02 +02:00
Niko Storni
38d493fdd1
upgrade lbrynet to support 0.32.* 2019-05-10 14:03:58 +02:00
Niko Storni
d33ac919d1
fix resolution
add broadcast flag
2019-05-10 14:03:58 +02:00
Niko Storni
ec0eec8ac4
ultimate needed SDK calls update
removed useless pagination

add type to claim signature
2019-05-10 14:03:56 +02:00
Niko Storni
2437a06505
add channelNew and claimAbandon
simplified code
2019-05-10 14:03:46 +02:00
Niko Storni
e90b6dfcc7
add more SDK calls
export jsonrpc.Decode
add dep as dependencies manager
2019-05-10 14:03:46 +02:00
Niko Storni
c0a12af3ae
add 3 new SDK calls 2019-05-10 14:03:46 +02:00
Mark Beamer Jr
d701bab7f6
Fixed claim signing after changes in 90bef98bc3 2019-05-04 00:00:23 -04:00
Mark Beamer Jr
c54836bca0
Added based types to serialization. 2019-04-28 19:10:07 -04:00
Mark Beamer Jr
322c658307
Refactored to work with types repo @3af92598 2019-04-21 23:06:48 -04:00
Mark Beamer Jr
16f251b7f3
ok my mistake, back to 0.5.0 - will update chainquery. 2019-04-14 00:25:15 -04:00
Mark Beamer Jr
ce30f66823
lowered slack api version to 0.2.0 - upgrading is a breaking change. 2019-04-14 00:13:26 -04:00
Mark Beamer Jr
3d72de1cec
lowered slack api version 2019-04-14 00:08:39 -04:00
Mark Beamer Jr
bad2d869f5
Added claim_id for v1 claim migration 2019-04-13 23:23:41 -04:00
Mark Beamer Jr
846f1afaab
Updated lbry.go to the latest types version 2019-04-11 14:45:31 -04:00
Mark
414540ec6d
Merge pull request #7 from lbryio/proto3
added support for proto3 metadata definitions
2019-04-11 00:15:10 -04:00
Mark Beamer Jr
9159c7602d
added support for proto3 metadata definitions
added signing capabilities
all with unit tests
2019-04-07 00:29:12 -04:00
Alex Grintsvayg
211c2884c2
lbry.com switch 2019-03-18 12:26:43 -04:00
Niko Storni
74be347b9e
Merge branch 'interpolation' 2019-03-08 19:15:03 +01:00
Niko Storni
93b9606fb7 try and upgrade deps 2019-03-08 19:11:52 +01:00
Niko Storni
f942e2dac2 add slice interpolation 2019-03-08 18:02:44 +01:00
Alex Grintsvayg
969a142382
add decrypt function 2019-02-18 14:33:05 -05:00
Niko Storni
c0b19e9395 update slack lib 2019-02-12 19:08:24 +01:00
Alex Grin
6cd5deb884
Update readme.md 2019-01-31 10:10:39 -05:00
Alex Grin
d79ed15728
Update readme.md 2019-01-31 10:03:34 -05:00
Alex Grintsvayg
51d03937c2 move tests around 2019-01-10 08:38:00 -05:00
Alex Grintsvayg
30c3125016 update dht imports 2019-01-09 17:37:29 -05:00
Alex Grintsvayg
bf61bd7b92 Merge remote-tracking branch 'reflector/dht'
* reflector/dht: (75 commits)
  some linting
  fixed a few reflector issues, added some tests
  final fix
  fixed or silenced the last few things to get this building
  fixed some linting errors
  fix rpc server
  update dependencies, only run short tests in travis
  fix stuck goroutine
  announce still needs tests, but i tested a lot by hand and its good
  hash announcer / rate limiter
  refactor contact sort
  more
  handle peer port correctly
  Revert "add tcp port mapping to data store"
  iterative find value rpc command
  add jack.lbry.tech as a known node for debugging
  add tcp port mapping to data store
  bucket splitting is solid
  add dht start command, run a jsonrpc server to interact with the node
  grin's cleanup and some WIP
  ...
2019-01-09 17:35:53 -05:00
Alex Grintsvayg
a8bc4d4e36 move non-proto code into extras/, switch to go modules, drop old dht 2019-01-09 17:31:22 -05:00
Niko Storni
f986bd3066 split response to reuse type 2019-01-03 15:01:51 +01:00
Mark
1c7c0acc88
Merge pull request #49 from lbryio/stopper-debug
improve stopper debugging
2018-12-28 20:24:45 -05:00
Mark Beamer Jr
b0beb12fcf
Added package documentation and removed sync creation in New since it is not needed unless the stopper is initialized with NewDebug 2018-12-28 12:15:03 -05:00
Alex Grintsvayg
2268aecd68 improve stopper debugging 2018-12-28 07:12:44 -05:00
Mark
7593aa704f
Merge pull request #48 from lbryio/stopper_debugging
Added debugging logic for our stopper pattern.
2018-12-23 01:22:58 -05:00
Mark Beamer Jr
48ce64fe2c
Added debugging logic for our stopper pattern. Useful for when a stopper doesn't stop right away as expected. 2018-12-23 01:19:27 -05:00
Mark
50996a6b0d
Merge pull request #5 from lbryio/remove_migratedfrom
removed migratedFrom field on ClaimHelper, and added unit tests.
2018-11-09 21:39:47 -05:00
Mark Beamer Jr
e29b47a047
removed migratedFrom field on ClaimHelper, and added unit tests. 2018-11-09 21:37:59 -05:00
Mark
7c3d1d062a
Merge pull request #47 from lbryio/lbryschema_update
updated lbryschema dependencies.
2018-11-09 19:08:49 -05:00
Mark Beamer Jr
3360a22346
updated lbryschema dependencies. 2018-11-09 18:46:35 -05:00
Mark
38325c6d44
Merge pull request #3 from lbryio/add_migration
Added migration method.
2018-11-09 17:15:53 -05:00
Mark Beamer Jr
c84b36d76d
added travis support. 2018-11-09 17:09:38 -05:00
Mark Beamer Jr
95c75ed957
Added negative test cases. 2018-11-09 17:09:34 -05:00
Mark Beamer Jr
ccd1b2b84d
Added migration method and cleaned things up. 2018-11-09 17:07:47 -05:00
Mark
dac2dbda61
Merge pull request #4 from lbryio/move_to_types
moved to lbryio/types/go
2018-11-09 17:05:07 -05:00
Mark Beamer Jr
2e58228e91
moved to lbryio/types/go 2018-11-09 17:04:05 -05:00
Alex Grintsvayg
a24a1a6c5e
claim decoding example 2018-11-08 15:52:47 -05:00
Alex Grintsvayg
66b108751d
add price field to download response 2018-11-06 14:52:12 -05:00
Alex Grintsvayg
df5177dff0
update blobex 2018-10-25 14:25:30 -04:00
Alex Grintsvayg
dd284f117f
small refactor 2018-10-25 14:20:44 -04:00
Alex Grintsvayg
92e35bd23a
blobex draft 2018-10-25 14:20:21 -04:00
Alex Grintsvayg
6356308048
fully match python's stream creation and decoding 2018-10-23 16:41:19 -04:00
Alex Grintsvayg
ad5abf26a8 tests pass on converting stream to file 2018-10-09 21:23:35 -04:00
Alex Grintsvayg
e5ee4ed714
started work on go blob primitives. successfully matched python's blob crypto (excluding canonical JSON) 2018-10-08 16:35:40 -04:00
Alex Grintsvayg
aa401b2ff3
split out ytsync to https://github.com/lbryio/ytsync 2018-10-08 16:26:45 -04:00
Alex Grin
7a6eb57280
Merge pull request #45 from lbryio/ytsync-refactor
Ytsync refactor
2018-10-08 15:44:58 -04:00
Alex Grintsvayg
cff5ed9711 some linting 2018-08-30 20:20:15 -04:00
Alex Grintsvayg
544ee88311 fixed a few reflector issues, added some tests 2018-08-09 15:46:40 -04:00
Alex Grintsvayg
2bf37ffd03 final fix 2018-08-07 12:06:02 -04:00
Alex Grintsvayg
8cd69c1a17 fixed or silenced the last few things to get this building 2018-08-07 11:53:29 -04:00
Alex Grintsvayg
0a54d4da56 fixed some linting errors
found them using

```
gometalinter --skip=vendor --disable-all --enable=megacheck --enable=deadcode --enable=ineffassign --enable=interfacer --enable=errcheck ./...
```
2018-08-07 11:38:56 -04:00
Alex Grintsvayg
d9b4c0f94d fix rpc server 2018-08-07 11:10:12 -04:00
Alex Grintsvayg
299f718f25 update dependencies, only run short tests in travis 2018-08-06 19:52:34 -04:00
Alex Grintsvayg
f701b78160 fix stuck goroutine 2018-08-06 19:52:09 -04:00
Alex Grintsvayg
38eaa17a9b announce still needs tests, but i tested a lot by hand and its good 2018-07-26 21:30:22 -04:00
Alex Grintsvayg
5378fcbb94 hash announcer / rate limiter 2018-07-26 16:05:27 -04:00
Alex Grintsvayg
965bed9587 refactor contact sort 2018-07-25 11:44:11 -04:00
Jack Robison
0151982bea more
-add rpc_port argument
-run node on localhost for testing
2018-07-17 17:19:03 -04:00
Alex Grintsvayg
f068daf0b8 handle peer port correctly 2018-07-13 13:31:54 -04:00
Alex Grintsvayg
283ec46bd5 Revert "add tcp port mapping to data store"
This reverts commit 76b0e156366163ad9caae988253f66680a4c5bec.
2018-07-13 12:49:41 -04:00
Jack Robison
c2d0c0a2d9 iterative find value rpc command
-add NodeID to GetRoutingTable response

-remove other debugging commands
2018-07-13 11:25:08 -04:00
Jack Robison
b136ac26ce add jack.lbry.tech as a known node for debugging 2018-07-13 11:24:11 -04:00
Jack Robison
a98d10fbd5 add tcp port mapping to data store 2018-07-13 11:23:18 -04:00
Alex Grintsvayg
a3d0a3543a bucket splitting is solid 2018-07-12 14:34:24 -04:00
Jack Robison
6e80d3d8e1 add dht start command, run a jsonrpc server to interact with the node 2018-07-12 10:17:14 -04:00
Alex Grintsvayg
5cdcdfdd09 grin's cleanup and some WIP 2018-07-10 17:35:02 -04:00
Jack Robison
a3ac49182c more 2018-06-29 17:03:59 -04:00
Jack Robison
7b8ab21b6c expand empty buckets 2018-06-29 13:33:35 -04:00
Jack Robison
1b41525f4b add BucketRange to bucket struct
-initialize the routing table with one bucket covering the entire keyspace
2018-06-26 16:31:29 -04:00
Alex Grintsvayg
6fefcc4530 oops 2018-06-26 10:58:19 -04:00
Alex Grintsvayg
4e78c08818 partial switch to new stopgroup. need to refactor to take advantage of child cancelation 2018-06-25 16:49:40 -04:00
Alex Grintsvayg
66ca77b690 remove sendCancelable 2018-06-25 15:56:45 -04:00
Alex Grintsvayg
e534f5b972 correct node_finder to use loose parallelism 2018-06-25 15:48:57 -04:00
Alex Grintsvayg
ea9b181d16 broke out contact into separate file 2018-06-25 13:00:55 -04:00
Alex Grintsvayg
767e4cc548 better nodefinder logging 2018-06-22 09:30:16 -04:00
Alex Grintsvayg
8a620a82a3 minor fixes 2018-06-21 15:06:40 -04:00
Alex Grintsvayg
f61ea53c8c add token cache 2018-06-21 15:06:40 -04:00
Alex Grintsvayg
766f4f101d TODO: review this 2018-06-21 15:06:40 -04:00
Alex Grintsvayg
18caec8d40 add proto version to dht 2018-06-21 15:06:40 -04:00
Alex Grintsvayg
1c2175df39 fixed a few channel lockups, fixed announced port in dht, successfully announced and served a blob 2018-06-21 11:26:48 -04:00
Alex Grintsvayg
47a732688d small test 2018-06-19 14:06:35 -04:00
Alex Grintsvayg
5e346cc21a cluster automatically balances what nodes are announcing what hashes 2018-06-19 13:47:13 -04:00
Alex Grintsvayg
b19df481da starting to put together the pieces
- prism start command
- more configs for prism when assembling the pieces
- cluster notifies on membership change, determines hash range, announces hashes
2018-06-14 22:30:38 -04:00
Alex Grintsvayg
fc9b05b8c6 move bitmap into separate package 2018-06-14 20:10:44 -04:00
Alex Grintsvayg
5968953d05 fix some stoppers 2018-06-13 12:45:47 -04:00
Mark Beamer Jr
63e58248cc Addressed code reviews. 2018-06-13 09:44:24 -04:00
Mark Beamer Jr
470e3721d0 implemented stopper pattern
-made defer adjustments inline and deleted the separate function.
-adjusted method in upload to take the only parameter it requires.
-Implemented stopper param for reflector server
-Aligned Cluster New to NewCluster
-Adjusted DHT to use StopAndWait
-Removed blocking waitgroup add
-Unified all components under prism.
-Moved defer done outside of functions.
-renamed NewCluster to New
-fixed travis errors.
2018-06-13 09:36:44 -04:00
Mark Beamer Jr
8100010220 code cleanup
-Added travis support
-updated travis to analyze code beneath the root.
-refactored upload.go to fix travis errors.
-gocyclo should ignore test files. $GOFILES needed to be adjusted.
-fix rows.Close() ignoring error. Created func to handle so defer can be used when needed also.
-fixed ignored errors.
-fixed unit test that was not passing correctly to anonymous function.
-fixed govet error for passing param inside go func.
-removed returned error, in favor of logging instead.
-added error logging for ignored error.
-fixed potential race conditions.
-removed unused append
-fixed time usage to align with go standards.
-removed unused variables
-made changes for code review.
-code comments for exported functions.
-Documented bitmap.go and insert into contact list.
-Documented dht, message, bootstrap
-Fixed comment typos
-Documented message,node, routing_table, testing in DHT package.
-Documented server, client, prism, server and shared in peer and reflector packages.
-Documented the stores in Store package.
-made defer adjustments inline and deleted the separate function.
-adjusted method in upload to take the only parameter it requires.
2018-06-13 09:29:13 -04:00
Alex Grintsvayg
79527da8a9 get rid of ReadDeadline, switch to updated stopOnce 2018-05-24 17:49:43 -04:00
Alex Grintsvayg
6a0cab5f62 update stopper 2018-05-24 13:05:05 -04:00
Alex Grintsvayg
1c31e54860 self-store 2018-05-22 12:27:49 -04:00
Alex Grintsvayg
13321b53b4 reannounce 2018-05-22 12:16:08 -04:00
Alex Grintsvayg
14cceda81e added routing table saving, bitmap operations, lots of tests 2018-05-19 13:06:19 -04:00
Jack Robison
185433f2fd
Merge pull request #2 from roylee17/fix-concurrent-write-to-addressPrefix
avoid concurrent write to addressPrefixes
2018-05-15 18:53:26 -04:00
Tzu-Jung Lee
40f9c3f961 avoid concurrent write to addressPrefixes 2018-05-15 10:13:05 -07:00
Alex Grintsvayg
b9ee0b0644 added upload command, --conf and --verbose flags 2018-05-14 20:55:48 -04:00
Alex Grintsvayg
611635c87d large dht test is a real test 2018-05-13 21:38:30 -04:00
Alex Grintsvayg
03a1e61d57 added concurrent dht test 2018-05-13 21:17:29 -04:00
Alex Grintsvayg
ffd8c891db bootstrap node, bucket grooming 2018-05-13 17:33:49 -04:00
Alex Grintsvayg
1f7841e4d0 minor refactor 2018-05-01 16:18:38 -04:00
Alex Grintsvayg
079a6bf610 move most dht code into Node 2018-04-27 20:16:12 -04:00
Alex Grintsvayg
34ab2cd1ae actually this is the correct behavior 2018-04-24 21:13:48 -04:00
Alex Grintsvayg
a232f02e2d expose how many initial nodes are found 2018-04-24 21:13:37 -04:00
Alex Grintsvayg
8f5313fe76 fix rt updates, add Ping method 2018-04-24 18:12:17 -04:00
Alex Grintsvayg
8a5917a982 bind all ports by default 2018-04-24 17:20:03 -04:00
Alex Grintsvayg
3070798f97 dht quirk 2018-04-24 17:19:16 -04:00
Alex Grintsvayg
0aa1ce7343 kill printstate when dht stops 2018-04-05 16:39:05 -04:00
Alex Grintsvayg
f5f47aa079 add token manager, add token to request/response, sucessfully perform a STORE request on the python daemon 2018-04-05 16:27:28 -04:00
Alex Grintsvayg
5a37e49765 more improvements 2018-04-05 11:35:57 -04:00
Alex Grintsvayg
a1349b3889 switch request.Args to be a bitmap 2018-04-04 12:01:44 -04:00
Alex Grintsvayg
79addd0b6e bitmaps in more places 2018-04-04 11:43:27 -04:00
Alex Grintsvayg
035be0893d peek at raw bencoded data to avoid extra decode 2018-04-03 14:15:04 -04:00
Alex Grintsvayg
5bb275afaa node finder is its own thing. simplify exported dht api 2018-04-03 14:00:35 -04:00
Alex Grintsvayg
ea8d0d1eed proper types for some IDs 2018-04-03 13:38:01 -04:00
Alex Grintsvayg
a5ef461fc5 findNode and findValue implemented 2018-04-03 12:14:04 -04:00
Alex Grintsvayg
24c079a7dd dht iterativefind, some tests 2018-03-28 21:05:27 -04:00
Alex Grintsvayg
05e2d8529a add transaction manager, fix bencoding to support int keys, fix routing table bucketing 2018-03-23 20:15:35 -04:00
Alex Grintsvayg
883d76d8bb fix bitmap prefix len 2018-03-15 14:42:57 -04:00
Alex Grintsvayg
211dbd7c50 findvalue done 2018-03-11 06:21:02 -04:00
Alex Grintsvayg
e13fe7c2f0 lots more work. findnode should work now 2018-03-08 19:50:18 -05:00
Alex Grintsvayg
5c44ca40c2 store works. fixed some bencode bugs 2018-03-07 19:49:33 -05:00
Alex Grintsvayg
006a49bd67 better tests, better bencoding 2018-03-07 16:15:58 -05:00
Alex Grintsvayg
f565d0b78f basic dht + tests 2018-03-06 20:15:44 -05:00
Jack Robison
dd814b834b
formatting and imports 2018-02-15 14:51:51 -05:00
Jack Robison
cdaf3ac682
add SerializeClaimFromJSON , DecodeAddress, and EncodeAddress to python binding 2017-11-27 10:25:04 -05:00
Jack Robison
6d169425d1
split b58 checksum validation from address validation 2017-11-27 10:24:10 -05:00
Jack Robison
f53da5c3e5
add DecodeClaimJSON 2017-11-27 10:23:24 -05:00
Jack Robison
d89334f9eb
verify certificate fields 2017-11-27 10:22:57 -05:00
Jack Robison
2fcc2f106e
cleanup 2017-11-27 10:22:04 -05:00
Jack Robison
4cff2dd233
add testnet and regtest address validation 2017-11-20 13:44:19 -05:00
Jack Robison
37a8c4cae1
add DecodeClaimHex to python binding 2017-11-13 10:11:19 -05:00
Jack Robison
8aaa786512
add python binding 2017-11-08 22:01:32 -05:00
Jack Robison
1f23a260ae
Merge branch 'validate-signatures' 2017-11-08 20:28:10 -05:00
Jack Robison
e02762ab6c
add decode tool 2017-11-07 21:41:07 -05:00
Jack Robison
9a629bb545
validate SECP256k1 signatures 2017-11-07 21:39:08 -05:00
Jack Robison
c62c175d8b
more 2017-09-12 12:02:30 -04:00
Jack Robison
eb8f72f0d8
initial commit 2017-09-06 10:31:28 -04:00
191 changed files with 17756 additions and 6956 deletions

View file

@ -1,5 +1,29 @@
os: linux
dist: trusty
dist: bionic
language: go
go:
- "1.10.x"
- 1.17.x
env:
global:
- GO111MODULE=on
#GITHUB_TOKEN
- secure: "P4YMfllsq/guf5swmBl80IHGvNfUM+5X2b+0LvQQxGJpY9TD6d+PW6U1C90HIT0CDk6UZbtlHyGN+oo+shsWnwMIaM4qmbGdRgtG4TZolXY1QtYQZFW9fTWeHM0wnJeXLV8V4vpTRHnkLrvT8PctbGp5pVOEtcV4y3sEnMP1rkxnMPeHtrDbTlpBmBYEziByEpbg3pe3CE9xVaDD9DqgW7VOOZnQQl7exTtjsIeJYI7OYvdidf/04p632/8WZP2EJbmA2HunfTydWtcD51lCF8V3IvhKKqH4/7sNOKfmUwTDyhLltk9eDcFsYR/FYsy8njH4QDBBrKo/bPq3jzgKF9BY7g28/jMTDY4vDY0Na+4a3+7sDqwdsZ+eiZrgWYUbZ4MwYtpUtcvp8gUz7Avs3v7BBgYsYpgLEW47bT6uNLGO+SdfSwBCYuxT5P0IGCv6wkgYcYgfqVafk+9FiwJlayP2j3wPOdauiYh4WnYX1Rt0zNvaiP5iBhkXwIv3VvUcI1Yu9k0eLDibzZlpR8fkGw3USl+omdjCvXDfJQW1ghMONTm0d7VPG84P/MRhJ+thoh1UwMaT3S0sdPznRnNL6XDntCRT1CZW4l0jfyCPDBW2qw8dWunULYsQZTPWnv4s+BuRRDsfGjyamH5b8evh0TOF+XNOFewmNvflWBNG2y0="
install: true
script:
# Fail if a .go file hasn't been formatted with gofmt
- test -z $(gofmt -s -l $(find . -iname '*.go' -type f))
- make
notifications:
email: false
deploy:
provider: script
skip_cleanup: true
script: ./scripts/deploy.sh
file: bin/lbry
on:
repo: lbryio/lbry.go
tags: true

361
Gopkg.lock generated
View file

@ -1,361 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:9a88883f474d09f1da61894cd8115c7f33988d6941e4f6236324c777aaff8f2c"
name = "github.com/PuerkitoBio/goquery"
packages = ["."]
pruneopts = ""
revision = "dc2ec5c7ca4d9aae063b79b9f581dd3ea6afd2b2"
version = "v1.4.1"
[[projects]]
digest = "1:e3726ad6f38f710e84c8dcd0e830014de6eaeea81f28d91ae898afecc078479a"
name = "github.com/andybalholm/cascadia"
packages = ["."]
pruneopts = ""
revision = "901648c87902174f774fac311d7f176f8647bdaa"
version = "v1.0.0"
[[projects]]
digest = "1:261d95f4464744d542759a7a33846f56f24113f5a93c7577f4cd7044f7cb3d76"
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/shareddefaults",
"private/protocol",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/s3",
"service/s3/s3iface",
"service/s3/s3manager",
"service/sts",
]
pruneopts = ""
revision = "b69f447375c7fa0047ebcdd8ae5d585d5aac2f71"
version = "v1.10.51"
[[projects]]
branch = "master"
digest = "1:cc8ebf0c6745d09f728f1fa4fbd29baaa2e3a65efb49b5fefb0c163171ee7863"
name = "github.com/btcsuite/btcd"
packages = [
"btcec",
"btcjson",
"chaincfg",
"chaincfg/chainhash",
"rpcclient",
"wire",
]
pruneopts = ""
revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64"
[[projects]]
branch = "master"
digest = "1:30d4a548e09bca4a0c77317c58e7407e2a65c15325e944f9c08a7b7992f8a59e"
name = "github.com/btcsuite/btclog"
packages = ["."]
pruneopts = ""
revision = "84c8d2346e9fc8c7b947e243b9c24e6df9fd206a"
[[projects]]
branch = "master"
digest = "1:b0f4d2431c167d7127a029210c1a7cdc33c9114c1b3fd3582347baad5e832588"
name = "github.com/btcsuite/btcutil"
packages = [
".",
"base58",
"bech32",
]
pruneopts = ""
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
[[projects]]
branch = "master"
digest = "1:422f38d57f1bc0fdc34f26d0f1026869a3710400b09b5478c9288efa13573cfa"
name = "github.com/btcsuite/go-socks"
packages = ["socks"]
pruneopts = ""
revision = "4720035b7bfd2a9bb130b1c184f8bbe41b6f0d0f"
[[projects]]
branch = "master"
digest = "1:dfc248d5e6e1582fdec83796d3d1d451aa6cae773c4e4ba1dac2838caef6d381"
name = "github.com/btcsuite/websocket"
packages = ["."]
pruneopts = ""
revision = "31079b6807923eb23992c421b114992b95131b55"
[[projects]]
digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = ""
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
digest = "1:968d8903d598e3fae738325d3410f33f07ea6a2b9ee5591e9c262ee37df6845a"
name = "github.com/go-errors/errors"
packages = ["."]
pruneopts = ""
revision = "a6af135bd4e28680facf08a3d206b454abc877a4"
version = "v1.0.1"
[[projects]]
branch = "master"
digest = "1:cd5bab9c9e23ffa6858eaa79dc827fd84bc24bc00b0cfb0b14036e393da2b1fa"
name = "github.com/go-ini/ini"
packages = ["."]
pruneopts = ""
revision = "5cf292cae48347c2490ac1a58fe36735fb78df7e"
[[projects]]
digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b"
name = "github.com/golang/protobuf"
packages = ["proto"]
pruneopts = ""
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
digest = "1:64d212c703a2b94054be0ce470303286b177ad260b2f89a307e3d1bb6c073ef6"
name = "github.com/gorilla/websocket"
packages = ["."]
pruneopts = ""
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
version = "v1.2.0"
[[projects]]
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
pruneopts = ""
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e"
name = "github.com/jmespath/go-jmespath"
packages = ["."]
pruneopts = ""
revision = "0b12d6b5"
[[projects]]
branch = "master"
digest = "1:d261f80387a38eeddc1d819ee9ee56d37ca10fc02e6e09ff400fb0ce146e13dc"
name = "github.com/lbryio/lbryschema.go"
packages = ["pb"]
pruneopts = ""
revision = "185433f2fd0c732547654749b98b37e56223dd22"
[[projects]]
digest = "1:5e30b8342813a6a85a647f9277e34ffcd5872dc57ab590dd9b251b145b6ec88f"
name = "github.com/lbryio/ozzo-validation"
packages = ["."]
pruneopts = ""
revision = "d1008ad1fd04ceb5faedaf34881df0c504382706"
version = "v3.1"
[[projects]]
branch = "master"
digest = "1:1dee6133ab829c8559a39031ad1e0e3538e4a7b34d3e0509d1fc247737e928c1"
name = "github.com/mitchellh/go-ps"
packages = ["."]
pruneopts = ""
revision = "4fdf99ab29366514c69ccccddab5dc58b8d84062"
[[projects]]
branch = "master"
digest = "1:eb9117392ee8e7aa44f78e0db603f70b1050ee0ebda4bd40040befb5b218c546"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
pruneopts = ""
revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b"
[[projects]]
digest = "1:3cb50c403fa46c85697dbc4e06a95008689e058f33466b7eb8d31ea0eb291ea3"
name = "github.com/nlopes/slack"
packages = ["."]
pruneopts = ""
revision = "8ab4d0b364ef1e9af5d102531da20d5ec902b6c4"
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:8d6d81d0d9d8153e65d637bda77a7c4e6ba496c61efac3578d7d8c981ac31a7b"
name = "github.com/rylio/ytdl"
packages = ["."]
pruneopts = ""
revision = "06f6510946275931157f5fe73f55ec7d6fd65870"
[[projects]]
branch = "master"
digest = "1:67b7dcb3b7e67cb6f96fb38fe7358bc1210453189da210e40cf357a92d57c1c1"
name = "github.com/shopspring/decimal"
packages = ["."]
pruneopts = ""
revision = "19e3cb6c29303990525b56f51acf77c5630dd88a"
[[projects]]
branch = "master"
digest = "1:c92f01303e3ab3b5da92657841639cb53d1548f0d2733d12ef3b9fd9d47c869e"
name = "github.com/sirupsen/logrus"
packages = ["."]
pruneopts = ""
revision = "ea8897e79973357ba785ac2533559a6297e83c44"
[[projects]]
branch = "master"
digest = "1:d0b38ba6da419a6d4380700218eeec8623841d44a856bb57369c172fbf692ab4"
name = "github.com/spf13/cast"
packages = ["."]
pruneopts = ""
revision = "8965335b8c7107321228e3e3702cab9832751bac"
[[projects]]
branch = "master"
digest = "1:bfbf4a9c265ef41f8d03c9d91e340aaddae835710eaed6cd2e6be889cbc05f56"
name = "github.com/spf13/cobra"
packages = ["."]
pruneopts = ""
revision = "1e58aa3361fd650121dceeedc399e7189c05674a"
[[projects]]
digest = "1:8e243c568f36b09031ec18dff5f7d2769dcf5ca4d624ea511c8e3197dc3d352d"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = ""
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
branch = "master"
digest = "1:22d3674d44ee93f52a9c0b6a22d1f736a0ad9ac3f9d2c1ca8648f3c9ce9910bd"
name = "github.com/ybbus/jsonrpc"
packages = ["."]
pruneopts = ""
revision = "2a548b7d822dd62717337a6b1e817fae1b14660a"
[[projects]]
branch = "master"
digest = "1:3610c577942fbfd2c8975d70a2342bbd13f30cf214237fb8f920c9a6cec0f14a"
name = "github.com/zeebo/bencode"
packages = ["."]
pruneopts = ""
revision = "d522839ac797fc43269dae6a04a1f8be475a915d"
[[projects]]
branch = "master"
digest = "1:8af4dda167d0ef21ab0affc797bff87ed0e87c57bd1d9bf57ad8f72d348c7932"
name = "golang.org/x/crypto"
packages = [
"ripemd160",
"sha3",
"ssh/terminal",
]
pruneopts = ""
revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9"
[[projects]]
branch = "master"
digest = "1:5dc6753986b9eeba4abdf05dedc5ba06bb52dad43cc8aad35ffb42bb7adfa68f"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"html",
"html/atom",
]
pruneopts = ""
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
[[projects]]
branch = "master"
digest = "1:baee54aa41cb93366e76a9c29f8dd2e4c4e6a35ff89551721d5275d2c858edc9"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = ""
revision = "bff228c7b664c5fce602223a05fb708fd8654986"
[[projects]]
branch = "master"
digest = "1:b064108d68f82d0201d9f812297c928e57488e82ccdb77ed06ac69f64519a890"
name = "google.golang.org/api"
packages = [
"gensupport",
"googleapi",
"googleapi/internal/uritemplates",
"googleapi/transport",
"youtube/v3",
]
pruneopts = ""
revision = "ef86ce4234efee96020bde00391d6a9cfae66561"
[[projects]]
digest = "1:f771bf87a3253de520c2af6fb6e75314dce0fedc0b30b208134fe502932bb15d"
name = "gopkg.in/nullbio/null.v6"
packages = ["convert"]
pruneopts = ""
revision = "40264a2e6b7972d183906cf17663983c23231c82"
version = "v6.3"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/aws/aws-sdk-go/aws",
"github.com/aws/aws-sdk-go/aws/awserr",
"github.com/aws/aws-sdk-go/aws/credentials",
"github.com/aws/aws-sdk-go/aws/session",
"github.com/aws/aws-sdk-go/service/s3",
"github.com/aws/aws-sdk-go/service/s3/s3manager",
"github.com/btcsuite/btcd/chaincfg",
"github.com/btcsuite/btcd/chaincfg/chainhash",
"github.com/btcsuite/btcd/rpcclient",
"github.com/btcsuite/btcutil",
"github.com/btcsuite/btcutil/base58",
"github.com/davecgh/go-spew/spew",
"github.com/go-errors/errors",
"github.com/go-ini/ini",
"github.com/lbryio/lbryschema.go/pb",
"github.com/lbryio/ozzo-validation",
"github.com/mitchellh/go-ps",
"github.com/mitchellh/mapstructure",
"github.com/nlopes/slack",
"github.com/rylio/ytdl",
"github.com/shopspring/decimal",
"github.com/sirupsen/logrus",
"github.com/spf13/cast",
"github.com/spf13/cobra",
"github.com/ybbus/jsonrpc",
"github.com/zeebo/bencode",
"golang.org/x/crypto/ripemd160",
"golang.org/x/crypto/sha3",
"google.golang.org/api/googleapi/transport",
"google.golang.org/api/youtube/v3",
"gopkg.in/nullbio/null.v6/convert",
]
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -1,62 +0,0 @@
[[constraint]]
name = "github.com/davecgh/go-spew"
version = "1.1.0"
[[constraint]]
name = "github.com/go-errors/errors"
version = "1.0.0"
[[constraint]]
branch = "master"
name = "github.com/rylio/ytdl"
[[constraint]]
branch = "master"
name = "github.com/lbryio/lbryschema.go"
[[constraint]]
branch = "master"
name = "github.com/mitchellh/mapstructure"
[[constraint]]
branch = "master"
name = "github.com/shopspring/decimal"
[[constraint]]
name = "github.com/sirupsen/logrus"
branch = "master"
[[constraint]]
name = "github.com/spf13/cast"
branch = "master"
[[constraint]]
branch = "master"
name = "github.com/spf13/cobra"
[[constraint]]
branch = "master"
name = "github.com/ybbus/jsonrpc"
[[constraint]]
branch = "master"
name = "github.com/zeebo/bencode"
[[constraint]]
branch = "master"
name = "google.golang.org/api"
[[constraint]]
branch = "master"
name = "github.com/btcsuite/btcd"
[[constraint]]
branch = "master"
name = "github.com/go-ini/ini"
[[constraint]]
branch = "master"
name = "github.com/btcsuite/btcutil"
[[constraint]]
name = "github.com/aws/aws-sdk-go"
version = "^1.10.51"

View file

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2016-2018 LBRY Inc
Copyright (c) 2016-2020 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

View file

@ -1,23 +1,17 @@
BINARY=lbry
DIR = $(shell cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)
VENDOR_DIR = vendor
VERSION=$(shell git --git-dir=${DIR}/.git describe --dirty --always --long --abbrev=7)
LDFLAGS = -ldflags "-X main.Version=${VERSION}"
.PHONY: build dep clean
.PHONY: build clean
.DEFAULT_GOAL: build
build: dep
build:
CGO_ENABLED=0 go build ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${DIR}/${BINARY} main.go
dep: | $(VENDOR_DIR)
$(VENDOR_DIR):
go get github.com/golang/dep/cmd/dep && dep ensure
clean:
if [ -f ${DIR}/${BINARY} ]; then rm ${DIR}/${BINARY}; fi

View file

@ -1,69 +0,0 @@
[![Build Status](https://travis-ci.org/lbryio/lbry.go.svg?branch=master)](https://travis-ci.org/lbryio/lbry.go)
# LBRY in Golang
lbry.go is a set of tools and projects implemented in Golang. See each subfolder for more details
## Installation
No installation required for lbry.go
## Usage
See individual subfolders for usage instructions
## Running from Source
### Go
Make sure you have Go 1.10.1+
- Ubuntu: https://launchpad.net/~longsleep/+archive/ubuntu/golang-backports or https://github.com/golang/go/wiki/Ubuntu
- OSX: `brew install go`
### Lbrycrd
_not strictly necessary, but recommended_
- Install lbrycrdd (https://github.com/lbryio/lbrycrd/releases)
- Ensure `~/.lbrycrd/lbrycrd.conf` file exists with username and password.
If you don't have one, run:
```
mkdir -p ~/.lbrycrd
echo -e "rpcuser=lbryrpc\nrpcpassword=$(env LC_CTYPE=C LC_ALL=C tr -dc A-Za-z0-9 < /dev/urandom | head -c 16 | xargs)" > ~/.lbrycrd/lbrycrd.conf
```
- Run `./lbrycrdd -server -daemon -txindex`. If you get an error about indexing, add the `-reindex` flag for one run. You will only need to
reindex once.
### building lbry.go
clone the repository
```
go get -u github.com/lbryio/lbry.go
cd "$(go env GOPATH)/src/github.com/lbryio/lbry.go"
```
run `make` from the root directory to build the binary
## Contributing
Contributions to this project are welcome, encouraged, and compensated. For more details, see [lbry.io/faq/contributing](https://lbry.io/faq/contributing)
GO strictly enforces a correct syntax therefore you might need to run `go fmt` from inside the each working directory.
When using an IDE like `Goland` you should set up file watchers such as to automatically format your code and sort your imports.
![alt text](img/filewatchers.png "file watchers")
## License
See [LICENSE](LICENSE)
## Security
We take security seriously. Please contact security@lbry.io regarding any issues you may encounter.
Our PGP key is [here](https://keybase.io/lbry/key.asc) if you need it.
## Contact
The primary contact for this project is [@nikooo777](https://github.com/nikooo777) (niko@lbry.io)

765
blobex/blobex.pb.go Normal file
View file

@ -0,0 +1,765 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: blobex.proto
package blobex
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Error struct {
// should we enum the error codes?
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Error) Reset() { *m = Error{} }
func (m *Error) String() string { return proto.CompactTextString(m) }
func (*Error) ProtoMessage() {}
func (*Error) Descriptor() ([]byte, []int) {
return fileDescriptor_183aee39e18f30c9, []int{0}
}
func (m *Error) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Error.Unmarshal(m, b)
}
func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Error.Marshal(b, m, deterministic)
}
func (m *Error) XXX_Merge(src proto.Message) {
xxx_messageInfo_Error.Merge(m, src)
}
func (m *Error) XXX_Size() int {
return xxx_messageInfo_Error.Size(m)
}
func (m *Error) XXX_DiscardUnknown() {
xxx_messageInfo_Error.DiscardUnknown(m)
}
var xxx_messageInfo_Error proto.InternalMessageInfo
func (m *Error) GetCode() uint32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Error) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
// how much does the host charge per kb at the moment
type PriceCheckRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PriceCheckRequest) Reset() { *m = PriceCheckRequest{} }
func (m *PriceCheckRequest) String() string { return proto.CompactTextString(m) }
func (*PriceCheckRequest) ProtoMessage() {}
func (*PriceCheckRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_183aee39e18f30c9, []int{1}
}
func (m *PriceCheckRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PriceCheckRequest.Unmarshal(m, b)
}
func (m *PriceCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PriceCheckRequest.Marshal(b, m, deterministic)
}
func (m *PriceCheckRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_PriceCheckRequest.Merge(m, src)
}
func (m *PriceCheckRequest) XXX_Size() int {
return xxx_messageInfo_PriceCheckRequest.Size(m)
}
func (m *PriceCheckRequest) XXX_DiscardUnknown() {
xxx_messageInfo_PriceCheckRequest.DiscardUnknown(m)
}
var xxx_messageInfo_PriceCheckRequest proto.InternalMessageInfo
type PriceCheckResponse struct {
Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
DeweysPerKB uint64 `protobuf:"varint,2,opt,name=deweysPerKB,proto3" json:"deweysPerKB,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PriceCheckResponse) Reset() { *m = PriceCheckResponse{} }
func (m *PriceCheckResponse) String() string { return proto.CompactTextString(m) }
func (*PriceCheckResponse) ProtoMessage() {}
func (*PriceCheckResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_183aee39e18f30c9, []int{2}
}
func (m *PriceCheckResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PriceCheckResponse.Unmarshal(m, b)
}
func (m *PriceCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PriceCheckResponse.Marshal(b, m, deterministic)
}
func (m *PriceCheckResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_PriceCheckResponse.Merge(m, src)
}
func (m *PriceCheckResponse) XXX_Size() int {
return xxx_messageInfo_PriceCheckResponse.Size(m)
}
func (m *PriceCheckResponse) XXX_DiscardUnknown() {
xxx_messageInfo_PriceCheckResponse.DiscardUnknown(m)
}
var xxx_messageInfo_PriceCheckResponse proto.InternalMessageInfo
func (m *PriceCheckResponse) GetError() *Error {
if m != nil {
return m.Error
}
return nil
}
func (m *PriceCheckResponse) GetDeweysPerKB() uint64 {
if m != nil {
return m.DeweysPerKB
}
return 0
}
// are any of the hashs available for download, or are any of the hashes desired for upload
// NOTE: if any hashes are stream hashes, and the server has the manifest but not all the content
// blobs, the server may reply that it needs extra blobs that were not in the original request
type HashesRequest struct {
Hashes []string `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HashesRequest) Reset() { *m = HashesRequest{} }
func (m *HashesRequest) String() string { return proto.CompactTextString(m) }
func (*HashesRequest) ProtoMessage() {}
func (*HashesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_183aee39e18f30c9, []int{3}
}
func (m *HashesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HashesRequest.Unmarshal(m, b)
}
func (m *HashesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HashesRequest.Marshal(b, m, deterministic)
}
func (m *HashesRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_HashesRequest.Merge(m, src)
}
func (m *HashesRequest) XXX_Size() int {
return xxx_messageInfo_HashesRequest.Size(m)
}
func (m *HashesRequest) XXX_DiscardUnknown() {
xxx_messageInfo_HashesRequest.DiscardUnknown(m)
}
var xxx_messageInfo_HashesRequest proto.InternalMessageInfo
func (m *HashesRequest) GetHashes() []string {
if m != nil {
return m.Hashes
}
return nil
}
type HashesResponse struct {
Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
Hashes map[string]bool `protobuf:"bytes,2,rep,name=hashes,proto3" json:"hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HashesResponse) Reset() { *m = HashesResponse{} }
func (m *HashesResponse) String() string { return proto.CompactTextString(m) }
func (*HashesResponse) ProtoMessage() {}
func (*HashesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_183aee39e18f30c9, []int{4}
}
func (m *HashesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HashesResponse.Unmarshal(m, b)
}
func (m *HashesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HashesResponse.Marshal(b, m, deterministic)
}
func (m *HashesResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_HashesResponse.Merge(m, src)
}
func (m *HashesResponse) XXX_Size() int {
return xxx_messageInfo_HashesResponse.Size(m)
}
func (m *HashesResponse) XXX_DiscardUnknown() {
xxx_messageInfo_HashesResponse.DiscardUnknown(m)
}
var xxx_messageInfo_HashesResponse proto.InternalMessageInfo
func (m *HashesResponse) GetError() *Error {
if m != nil {
return m.Error
}
return nil
}
func (m *HashesResponse) GetHashes() map[string]bool {
if m != nil {
return m.Hashes
}
return nil
}
// download the hash
type DownloadRequest struct {
Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DownloadRequest) Reset() { *m = DownloadRequest{} }
func (m *DownloadRequest) String() string { return proto.CompactTextString(m) }
func (*DownloadRequest) ProtoMessage() {}
func (*DownloadRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_183aee39e18f30c9, []int{5}
}
func (m *DownloadRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DownloadRequest.Unmarshal(m, b)
}
func (m *DownloadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DownloadRequest.Marshal(b, m, deterministic)
}
func (m *DownloadRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DownloadRequest.Merge(m, src)
}
func (m *DownloadRequest) XXX_Size() int {
return xxx_messageInfo_DownloadRequest.Size(m)
}
func (m *DownloadRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DownloadRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DownloadRequest proto.InternalMessageInfo
func (m *DownloadRequest) GetHash() string {
if m != nil {
return m.Hash
}
return ""
}
type DownloadResponse struct {
Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
Address string `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"`
Price uint64 `protobuf:"varint,5,opt,name=price,proto3" json:"price,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DownloadResponse) Reset() { *m = DownloadResponse{} }
func (m *DownloadResponse) String() string { return proto.CompactTextString(m) }
func (*DownloadResponse) ProtoMessage() {}
func (*DownloadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_183aee39e18f30c9, []int{6}
}
func (m *DownloadResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DownloadResponse.Unmarshal(m, b)
}
func (m *DownloadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DownloadResponse.Marshal(b, m, deterministic)
}
func (m *DownloadResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_DownloadResponse.Merge(m, src)
}
func (m *DownloadResponse) XXX_Size() int {
return xxx_messageInfo_DownloadResponse.Size(m)
}
func (m *DownloadResponse) XXX_DiscardUnknown() {
xxx_messageInfo_DownloadResponse.DiscardUnknown(m)
}
var xxx_messageInfo_DownloadResponse proto.InternalMessageInfo
func (m *DownloadResponse) GetError() *Error {
if m != nil {
return m.Error
}
return nil
}
func (m *DownloadResponse) GetHash() string {
if m != nil {
return m.Hash
}
return ""
}
func (m *DownloadResponse) GetBlob() []byte {
if m != nil {
return m.Blob
}
return nil
}
func (m *DownloadResponse) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
func (m *DownloadResponse) GetPrice() uint64 {
if m != nil {
return m.Price
}
return 0
}
// upload the hash
type UploadRequest struct {
Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
Blob []byte `protobuf:"bytes,2,opt,name=blob,proto3" json:"blob,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UploadRequest) Reset() { *m = UploadRequest{} }
func (m *UploadRequest) String() string { return proto.CompactTextString(m) }
func (*UploadRequest) ProtoMessage() {}
func (*UploadRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_183aee39e18f30c9, []int{7}
}
func (m *UploadRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UploadRequest.Unmarshal(m, b)
}
func (m *UploadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UploadRequest.Marshal(b, m, deterministic)
}
func (m *UploadRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UploadRequest.Merge(m, src)
}
func (m *UploadRequest) XXX_Size() int {
return xxx_messageInfo_UploadRequest.Size(m)
}
func (m *UploadRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UploadRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UploadRequest proto.InternalMessageInfo
func (m *UploadRequest) GetHash() string {
if m != nil {
return m.Hash
}
return ""
}
func (m *UploadRequest) GetBlob() []byte {
if m != nil {
return m.Blob
}
return nil
}
type UploadResponse struct {
Error *Error `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UploadResponse) Reset() { *m = UploadResponse{} }
func (m *UploadResponse) String() string { return proto.CompactTextString(m) }
func (*UploadResponse) ProtoMessage() {}
func (*UploadResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_183aee39e18f30c9, []int{8}
}
func (m *UploadResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UploadResponse.Unmarshal(m, b)
}
func (m *UploadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UploadResponse.Marshal(b, m, deterministic)
}
func (m *UploadResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_UploadResponse.Merge(m, src)
}
func (m *UploadResponse) XXX_Size() int {
return xxx_messageInfo_UploadResponse.Size(m)
}
func (m *UploadResponse) XXX_DiscardUnknown() {
xxx_messageInfo_UploadResponse.DiscardUnknown(m)
}
var xxx_messageInfo_UploadResponse proto.InternalMessageInfo
func (m *UploadResponse) GetError() *Error {
if m != nil {
return m.Error
}
return nil
}
func (m *UploadResponse) GetHash() string {
if m != nil {
return m.Hash
}
return ""
}
func init() {
proto.RegisterType((*Error)(nil), "blobex.Error")
proto.RegisterType((*PriceCheckRequest)(nil), "blobex.PriceCheckRequest")
proto.RegisterType((*PriceCheckResponse)(nil), "blobex.PriceCheckResponse")
proto.RegisterType((*HashesRequest)(nil), "blobex.HashesRequest")
proto.RegisterType((*HashesResponse)(nil), "blobex.HashesResponse")
proto.RegisterMapType((map[string]bool)(nil), "blobex.HashesResponse.HashesEntry")
proto.RegisterType((*DownloadRequest)(nil), "blobex.DownloadRequest")
proto.RegisterType((*DownloadResponse)(nil), "blobex.DownloadResponse")
proto.RegisterType((*UploadRequest)(nil), "blobex.UploadRequest")
proto.RegisterType((*UploadResponse)(nil), "blobex.UploadResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// BlobExchangeClient is the client API for BlobExchange service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type BlobExchangeClient interface {
PriceCheck(ctx context.Context, in *PriceCheckRequest, opts ...grpc.CallOption) (*PriceCheckResponse, error)
DownloadCheck(ctx context.Context, in *HashesRequest, opts ...grpc.CallOption) (*HashesResponse, error)
Download(ctx context.Context, opts ...grpc.CallOption) (BlobExchange_DownloadClient, error)
UploadCheck(ctx context.Context, in *HashesRequest, opts ...grpc.CallOption) (*HashesResponse, error)
Upload(ctx context.Context, opts ...grpc.CallOption) (BlobExchange_UploadClient, error)
}
type blobExchangeClient struct {
cc *grpc.ClientConn
}
func NewBlobExchangeClient(cc *grpc.ClientConn) BlobExchangeClient {
return &blobExchangeClient{cc}
}
func (c *blobExchangeClient) PriceCheck(ctx context.Context, in *PriceCheckRequest, opts ...grpc.CallOption) (*PriceCheckResponse, error) {
out := new(PriceCheckResponse)
err := c.cc.Invoke(ctx, "/blobex.BlobExchange/PriceCheck", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *blobExchangeClient) DownloadCheck(ctx context.Context, in *HashesRequest, opts ...grpc.CallOption) (*HashesResponse, error) {
out := new(HashesResponse)
err := c.cc.Invoke(ctx, "/blobex.BlobExchange/DownloadCheck", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *blobExchangeClient) Download(ctx context.Context, opts ...grpc.CallOption) (BlobExchange_DownloadClient, error) {
stream, err := c.cc.NewStream(ctx, &_BlobExchange_serviceDesc.Streams[0], "/blobex.BlobExchange/Download", opts...)
if err != nil {
return nil, err
}
x := &blobExchangeDownloadClient{stream}
return x, nil
}
type BlobExchange_DownloadClient interface {
Send(*DownloadRequest) error
Recv() (*DownloadResponse, error)
grpc.ClientStream
}
type blobExchangeDownloadClient struct {
grpc.ClientStream
}
func (x *blobExchangeDownloadClient) Send(m *DownloadRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *blobExchangeDownloadClient) Recv() (*DownloadResponse, error) {
m := new(DownloadResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *blobExchangeClient) UploadCheck(ctx context.Context, in *HashesRequest, opts ...grpc.CallOption) (*HashesResponse, error) {
out := new(HashesResponse)
err := c.cc.Invoke(ctx, "/blobex.BlobExchange/UploadCheck", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *blobExchangeClient) Upload(ctx context.Context, opts ...grpc.CallOption) (BlobExchange_UploadClient, error) {
stream, err := c.cc.NewStream(ctx, &_BlobExchange_serviceDesc.Streams[1], "/blobex.BlobExchange/Upload", opts...)
if err != nil {
return nil, err
}
x := &blobExchangeUploadClient{stream}
return x, nil
}
type BlobExchange_UploadClient interface {
Send(*UploadRequest) error
Recv() (*UploadResponse, error)
grpc.ClientStream
}
type blobExchangeUploadClient struct {
grpc.ClientStream
}
func (x *blobExchangeUploadClient) Send(m *UploadRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *blobExchangeUploadClient) Recv() (*UploadResponse, error) {
m := new(UploadResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// BlobExchangeServer is the server API for BlobExchange service.
type BlobExchangeServer interface {
PriceCheck(context.Context, *PriceCheckRequest) (*PriceCheckResponse, error)
DownloadCheck(context.Context, *HashesRequest) (*HashesResponse, error)
Download(BlobExchange_DownloadServer) error
UploadCheck(context.Context, *HashesRequest) (*HashesResponse, error)
Upload(BlobExchange_UploadServer) error
}
func RegisterBlobExchangeServer(s *grpc.Server, srv BlobExchangeServer) {
s.RegisterService(&_BlobExchange_serviceDesc, srv)
}
func _BlobExchange_PriceCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PriceCheckRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BlobExchangeServer).PriceCheck(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/blobex.BlobExchange/PriceCheck",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BlobExchangeServer).PriceCheck(ctx, req.(*PriceCheckRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BlobExchange_DownloadCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(HashesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BlobExchangeServer).DownloadCheck(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/blobex.BlobExchange/DownloadCheck",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BlobExchangeServer).DownloadCheck(ctx, req.(*HashesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BlobExchange_Download_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(BlobExchangeServer).Download(&blobExchangeDownloadServer{stream})
}
type BlobExchange_DownloadServer interface {
Send(*DownloadResponse) error
Recv() (*DownloadRequest, error)
grpc.ServerStream
}
type blobExchangeDownloadServer struct {
grpc.ServerStream
}
func (x *blobExchangeDownloadServer) Send(m *DownloadResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *blobExchangeDownloadServer) Recv() (*DownloadRequest, error) {
m := new(DownloadRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _BlobExchange_UploadCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(HashesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(BlobExchangeServer).UploadCheck(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/blobex.BlobExchange/UploadCheck",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(BlobExchangeServer).UploadCheck(ctx, req.(*HashesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _BlobExchange_Upload_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(BlobExchangeServer).Upload(&blobExchangeUploadServer{stream})
}
type BlobExchange_UploadServer interface {
Send(*UploadResponse) error
Recv() (*UploadRequest, error)
grpc.ServerStream
}
type blobExchangeUploadServer struct {
grpc.ServerStream
}
func (x *blobExchangeUploadServer) Send(m *UploadResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *blobExchangeUploadServer) Recv() (*UploadRequest, error) {
m := new(UploadRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _BlobExchange_serviceDesc = grpc.ServiceDesc{
ServiceName: "blobex.BlobExchange",
HandlerType: (*BlobExchangeServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "PriceCheck",
Handler: _BlobExchange_PriceCheck_Handler,
},
{
MethodName: "DownloadCheck",
Handler: _BlobExchange_DownloadCheck_Handler,
},
{
MethodName: "UploadCheck",
Handler: _BlobExchange_UploadCheck_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "Download",
Handler: _BlobExchange_Download_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "Upload",
Handler: _BlobExchange_Upload_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "blobex.proto",
}
func init() { proto.RegisterFile("blobex.proto", fileDescriptor_183aee39e18f30c9) }
var fileDescriptor_183aee39e18f30c9 = []byte{
// 449 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0x13, 0x31,
0x10, 0xae, 0x37, 0x3f, 0x34, 0xb3, 0xd9, 0x52, 0x06, 0x28, 0x26, 0xa7, 0x95, 0x11, 0x62, 0x4f,
0x15, 0x0a, 0x42, 0x40, 0x25, 0x10, 0x6a, 0x89, 0x04, 0xe2, 0x52, 0x59, 0xe2, 0xc4, 0x69, 0x93,
0x1d, 0x35, 0xa8, 0xcb, 0x7a, 0xb1, 0x53, 0xda, 0x3c, 0x05, 0x2f, 0xc2, 0x1b, 0xf1, 0x32, 0xc8,
0xf6, 0xba, 0x9b, 0xb4, 0x95, 0x50, 0xe8, 0x6d, 0xbe, 0xf1, 0xcc, 0xf7, 0x8d, 0xfd, 0x8d, 0x0c,
0xc3, 0x69, 0xa9, 0xa6, 0x74, 0xb1, 0x5f, 0x6b, 0xb5, 0x50, 0xd8, 0xf7, 0x48, 0xbc, 0x84, 0xde,
0x44, 0x6b, 0xa5, 0x11, 0xa1, 0x3b, 0x53, 0x05, 0x71, 0x96, 0xb2, 0x2c, 0x91, 0x2e, 0x46, 0x0e,
0x77, 0xbe, 0x93, 0x31, 0xf9, 0x09, 0xf1, 0x28, 0x65, 0xd9, 0x40, 0x06, 0x28, 0xee, 0xc3, 0xbd,
0x63, 0xfd, 0x6d, 0x46, 0x47, 0x73, 0x9a, 0x9d, 0x4a, 0xfa, 0x71, 0x46, 0x66, 0x21, 0xbe, 0x02,
0xae, 0x26, 0x4d, 0xad, 0x2a, 0x43, 0xf8, 0x04, 0x7a, 0x64, 0x15, 0x1c, 0x73, 0x3c, 0x4e, 0xf6,
0x9b, 0x39, 0x9c, 0xac, 0xf4, 0x67, 0x98, 0x42, 0x5c, 0xd0, 0x39, 0x2d, 0xcd, 0x31, 0xe9, 0xcf,
0x87, 0x4e, 0xad, 0x2b, 0x57, 0x53, 0xe2, 0x19, 0x24, 0x1f, 0x73, 0x33, 0x27, 0xd3, 0xa8, 0xe1,
0x1e, 0xf4, 0xe7, 0x2e, 0xc1, 0x59, 0xda, 0xc9, 0x06, 0xb2, 0x41, 0xe2, 0x37, 0x83, 0x9d, 0x50,
0xb9, 0xc9, 0x08, 0x07, 0x97, 0x7c, 0x51, 0xda, 0xc9, 0xe2, 0xb1, 0x08, 0x55, 0xeb, 0x64, 0x0d,
0x9c, 0x54, 0x0b, 0xbd, 0x0c, 0x9a, 0xa3, 0x37, 0x10, 0xaf, 0xa4, 0x71, 0x17, 0x3a, 0xa7, 0xb4,
0x74, 0x6a, 0x03, 0x69, 0x43, 0x7c, 0x00, 0xbd, 0x9f, 0x79, 0x79, 0xe6, 0xdf, 0x71, 0x5b, 0x7a,
0x70, 0x10, 0xbd, 0x66, 0xe2, 0x29, 0xdc, 0xfd, 0xa0, 0xce, 0xab, 0x52, 0xe5, 0x45, 0xb8, 0x19,
0x42, 0xd7, 0xf2, 0x36, 0xfd, 0x2e, 0x16, 0xbf, 0x18, 0xec, 0xb6, 0x75, 0x9b, 0xdc, 0x2b, 0xb0,
0x45, 0x2d, 0x9b, 0xcd, 0xd9, 0x52, 0xde, 0x49, 0x59, 0x36, 0x94, 0x2e, 0xb6, 0x66, 0xe7, 0x45,
0xa1, 0xc9, 0x18, 0xde, 0xf5, 0x66, 0x37, 0xd0, 0x0e, 0x5f, 0x5b, 0x5f, 0x79, 0xcf, 0xd9, 0xe2,
0x81, 0x78, 0x05, 0xc9, 0x97, 0xfa, 0x1f, 0x63, 0x5f, 0x0a, 0x45, 0xad, 0x90, 0xf8, 0x04, 0x3b,
0xa1, 0xf1, 0x96, 0xf7, 0x18, 0xff, 0x89, 0x60, 0x78, 0x58, 0xaa, 0xe9, 0xe4, 0x62, 0x36, 0xcf,
0xab, 0x13, 0xc2, 0x09, 0x40, 0xbb, 0x82, 0xf8, 0x38, 0x10, 0x5d, 0xdb, 0xd5, 0xd1, 0xe8, 0xa6,
0x23, 0x3f, 0x8e, 0xd8, 0xc2, 0xf7, 0x90, 0x84, 0xc7, 0xf6, 0x4c, 0x0f, 0xaf, 0x2e, 0x83, 0x67,
0xd9, 0xbb, 0x79, 0x47, 0xc4, 0x16, 0x1e, 0xc1, 0x76, 0x60, 0xc0, 0x47, 0xa1, 0xea, 0x8a, 0xd1,
0x23, 0x7e, 0xfd, 0x20, 0x10, 0x64, 0xec, 0x39, 0xc3, 0x77, 0x10, 0xfb, 0x97, 0xfa, 0xcf, 0x21,
0xde, 0x42, 0xdf, 0xf7, 0xb7, 0xad, 0x6b, 0x96, 0xb5, 0xad, 0xeb, 0x86, 0x78, 0xf9, 0x69, 0xdf,
0x7d, 0x15, 0x2f, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x4a, 0x7e, 0x89, 0x3a, 0x04, 0x00,
0x00,
}

87
blobex/blobex.proto Normal file
View file

@ -0,0 +1,87 @@
syntax = "proto3";
package blobex;
/*
## Errors
- invalid request
- banned for nonpayment
- banned for uploading unwanted blobs
- blob not wanted
- blob not available
- not accepting blobs
## Considerations
- there are two requests to upload a blob. how to reduce that to one?
- UploadCheck checks for many hashes at once. if you're just uploading one or a few, just do it and handle the error
- how to avoid receiving the whole blob and then determining the blob is not wanted? may not ever be possible
- is avail check necessary? just request what you want for download
- maybe you want to check multiple blobs at once?
- how to check for wanted blobs from stream hash?
- prices should be set by hosts, since they are offering blobs for download and have the best information on prices. but request is initiated by client. how do we make sure clients are not overpaying without making them make two requests for each blob?
- should we have another request to submit proof of payment? or at least a txid?
*/
service BlobExchange {
rpc PriceCheck(PriceCheckRequest) returns (PriceCheckResponse) {}
rpc DownloadCheck(HashesRequest) returns (HashesResponse) {}
rpc Download(stream DownloadRequest) returns (stream DownloadResponse) {}
rpc UploadCheck(HashesRequest) returns (HashesResponse) {}
rpc Upload(stream UploadRequest) returns (stream UploadResponse) {}
}
message Error {
// should we enum the error codes?
uint32 code = 1;
string message = 2;
}
// how much does the host charge per kb at the moment
message PriceCheckRequest {
}
message PriceCheckResponse{
Error error = 1;
uint64 deweysPerKB = 2;
}
// are any of the hashs available for download, or are any of the hashes desired for upload
// NOTE: if any hashes are stream hashes, and the server has the manifest but not all the content
// blobs, the server may reply that it needs extra blobs that were not in the original request
message HashesRequest {
repeated string hashes = 1;
}
message HashesResponse {
Error error = 1;
map<string, bool> hashes = 2;
}
// download the hash
message DownloadRequest {
string hash = 1;
}
message DownloadResponse {
Error error = 1;
string hash = 2;
bytes blob = 3;
string address = 4; // address where payment for data download should be sent
uint64 price = 5; // price of the data in this blob
}
// upload the hash
message UploadRequest {
string hash = 1;
bytes blob = 2;
}
message UploadResponse {
Error error = 1;
string hash = 2;
}

29
blobex/proto.sh Executable file
View file

@ -0,0 +1,29 @@
#!/bin/bash
set -euo pipefail
#set -x
version_gte() {
[ "$1" = "$(echo -e "$1\n$2" | sort -V | tail -n1)" ]
}
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
hash protoc 2>/dev/null || { echo >&2 -e 'error: protoc binary not found\nDownload it from https://github.com/google/protobuf/releases and put it in your path.\nMake sure you get the one starting with `protoc`, not `protobuf`.'; exit 1; }
PROTOC="$(which protoc)"
VERSION="$($PROTOC --version | cut -d' ' -f2)"
MIN_VERSION="3.0"
version_gte "$VERSION" "$MIN_VERSION" || { echo >&2 "error: protoc version must be >= $MIN_VERSION (your $PROTOC is $VERSION)"; exit 1; }
hash protoc-gen-go 2>/dev/null || go get -u github.com/golang/protobuf/protoc-gen-go
hash protoc-gen-go 2>/dev/null || { echo >&2 'error: Make sure $GOPATH/bin is in your $PATH'; exit 1; }
find . -type f -iname '*.pb.go' -delete
protoc --proto_path=. blobex.proto --go_out=plugins=grpc:.

49
blobex/server.go Normal file
View file

@ -0,0 +1,49 @@
package blobex
import (
"fmt"
"net"
"github.com/lbryio/lbry.go/v2/extras/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type Server struct {
pricePerKB uint64
}
func ListenAndServe(port int) (*grpc.Server, error) {
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
return nil, errors.Prefix("failed to listen", err)
}
grpcServer := grpc.NewServer()
RegisterBlobExchangeServer(grpcServer, &Server{})
// determine whether to use TLS
err = grpcServer.Serve(listener)
return grpcServer, err
}
func (s *Server) PriceCheck(ctx context.Context, r *PriceCheckRequest) (*PriceCheckResponse, error) {
return &PriceCheckResponse{
DeweysPerKB: s.pricePerKB,
}, nil
}
func (s *Server) DownloadCheck(context.Context, *HashesRequest) (*HashesResponse, error) {
return nil, nil
}
func (s *Server) Download(BlobExchange_DownloadServer) error {
return nil
}
func (s *Server) UploadCheck(context.Context, *HashesRequest) (*HashesResponse, error) {
return nil, nil
}
func (s *Server) Upload(BlobExchange_UploadServer) error {
return nil
}

24
claim/decode.go Normal file
View file

@ -0,0 +1,24 @@
package claim
import (
"bytes"
types "github.com/lbryio/types/v2/go"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
)
func ToJSON(value []byte) (string, error) {
c := &types.Claim{}
err := proto.Unmarshal(value, c)
if err != nil {
return "", err
}
b := bytes.NewBuffer(nil)
m := jsonpb.Marshaler{Indent: " "}
err = m.Marshal(b, c)
return b.String(), err
}

View file

@ -1,38 +0,0 @@
package cmd
import (
sync "github.com/lbryio/lbry.go/ytsync"
"github.com/lbryio/lbry.go/ytsync/sdk"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func init() {
var ytCountCmd = &cobra.Command{
Use: "ytcount <youtube_api_key> <youtube_channel_id>",
Args: cobra.ExactArgs(2),
Short: "Count videos in a youtube channel",
Run: ytcount,
}
RootCmd.AddCommand(ytCountCmd)
}
func ytcount(cmd *cobra.Command, args []string) {
ytAPIKey := args[0]
channelID := args[1]
s := sync.Sync{
APIConfig: &sdk.APIConfig{
YoutubeAPIKey: ytAPIKey,
},
YoutubeChannelID: channelID,
}
count, err := s.CountVideos()
if err != nil {
panic(err)
}
log.Printf("%d videos in channel %s\n", count, channelID)
}

View file

@ -1,63 +0,0 @@
package cmd
import (
"strconv"
"time"
"github.com/lbryio/lbry.go/dht"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func init() {
d := &cobra.Command{
Use: "dht <action>",
Args: cobra.ExactArgs(1),
Short: "Do DHT things",
Run: dhtCmd,
}
RootCmd.AddCommand(d)
ping := &cobra.Command{
Use: "ping <ip>",
Args: cobra.ExactArgs(1),
Short: "Ping a node on the DHT",
Run: dhtPingCmd,
}
d.AddCommand(ping)
}
func dhtCmd(cmd *cobra.Command, args []string) {
log.Errorln("chose a command")
}
func dhtPingCmd(cmd *cobra.Command, args []string) {
//ip := args[0]
port := 49449 // + (rand.Int() % 10)
config := dht.NewStandardConfig()
config.Address = "127.0.0.1:" + strconv.Itoa(port)
config.PrimeNodes = []string{
"127.0.0.1:10001",
}
d := dht.New(config)
log.Println("Starting...")
go d.Run()
time.Sleep(2 * time.Second)
for {
peers, err := d.FindNode("012b66fc7052d9a0c8cb563b8ede7662003ba65f425c2661b5c6919d445deeb31469be8b842d6faeea3f2b3ebcaec845")
if err != nil {
time.Sleep(time.Second * 1)
continue
}
log.Println("Found peers:", peers)
break
}
log.Println("done")
}

View file

@ -1,187 +0,0 @@
package cmd
import (
"strconv"
"sync"
"time"
"github.com/lbryio/lbry.go/errors"
"github.com/lbryio/lbry.go/jsonrpc"
"github.com/shopspring/decimal"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func init() {
var franklinCmd = &cobra.Command{
Use: "franklin",
Short: "Test availability of homepage content",
Run: func(cmd *cobra.Command, args []string) {
franklin()
},
}
RootCmd.AddCommand(franklinCmd)
}
const (
maxPrice = float64(999)
waitForStart = 5 * time.Second
waitForEnd = 60 * time.Minute
maxParallelTests = 5
)
type Result struct {
started bool
finished bool
}
func franklin() {
conn := jsonrpc.NewClient("")
var wg sync.WaitGroup
queue := make(chan string)
var mutex sync.Mutex
results := map[string]Result{}
for i := 0; i < maxParallelTests; i++ {
go func() {
wg.Add(1)
defer wg.Done()
for {
url, more := <-queue
if !more {
return
}
res, err := doURL(conn, url)
mutex.Lock()
results[url] = res
mutex.Unlock()
if err != nil {
log.Errorln(url + ": " + err.Error())
}
}
}()
}
urls := []string{"one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten"}
for _, url := range urls {
queue <- url
}
close(queue)
wg.Wait()
countStarted := 0
countFinished := 0
for _, r := range results {
if r.started {
countStarted++
}
if r.finished {
countFinished++
}
}
log.Println("Started: " + strconv.Itoa(countStarted) + " of " + strconv.Itoa(len(results)))
log.Println("Finished: " + strconv.Itoa(countFinished) + " of " + strconv.Itoa(len(results)))
}
func doURL(conn *jsonrpc.Client, url string) (Result, error) {
log.Infoln(url + ": Starting")
result := Result{}
price, err := conn.StreamCostEstimate(url, nil)
if err != nil {
return result, err
}
if price == nil {
return result, errors.Err("could not get price of " + url)
}
if decimal.Decimal(*price).Cmp(decimal.NewFromFloat(maxPrice)) == 1 {
return result, errors.Err("the price of " + url + " is too damn high")
}
startTime := time.Now()
get, err := conn.Get(url, nil, nil)
if err != nil {
return result, err
} else if get == nil {
return result, errors.Err("received no response for 'get' of " + url)
}
if get.Completed {
log.Infoln(url + ": cannot test because we already have it")
return result, nil
}
log.Infoln(url + ": get took " + time.Since(startTime).String())
log.Infoln(url + ": waiting " + waitForStart.String() + " to see if it starts")
time.Sleep(waitForStart)
fileStartedResult, err := conn.FileList(jsonrpc.FileListOptions{Outpoint: &get.Outpoint})
if err != nil {
return result, err
}
if fileStartedResult == nil || len(*fileStartedResult) < 1 {
log.Errorln(url + ": failed to start in " + waitForStart.String())
} else if (*fileStartedResult)[0].Completed {
log.Infoln(url + ": already finished after " + waitForStart.String() + ". boom!")
result.started = true
result.finished = true
return result, nil
} else if (*fileStartedResult)[0].WrittenBytes == 0 {
log.Errorln(url + ": says it started, but has 0 bytes downloaded after " + waitForStart.String())
} else {
log.Infoln(url + ": started, with " + strconv.FormatUint((*fileStartedResult)[0].WrittenBytes, 10) + " bytes downloaded")
result.started = true
}
log.Infoln(url + ": waiting up to " + waitForEnd.String() + " for file to finish")
var fileFinishedResult *jsonrpc.FileListResponse
ticker := time.NewTicker(15 * time.Second)
// todo: timeout should be based on file size
timeout := time.After(waitForEnd)
WaitForFinish:
for {
select {
case <-ticker.C:
fileFinishedResult, err = conn.FileList(jsonrpc.FileListOptions{Outpoint: &get.Outpoint})
if err != nil {
return result, err
}
if fileFinishedResult != nil && len(*fileFinishedResult) > 0 {
if (*fileFinishedResult)[0].Completed {
ticker.Stop()
break WaitForFinish
} else {
log.Infoln(url + ": " + strconv.FormatUint((*fileFinishedResult)[0].WrittenBytes, 10) + " bytes downloaded after " + time.Since(startTime).String())
}
}
case <-timeout:
ticker.Stop()
break WaitForFinish
}
}
if fileFinishedResult == nil || len(*fileFinishedResult) < 1 {
log.Errorln(url + ": failed to start at all")
} else if !(*fileFinishedResult)[0].Completed {
log.Errorln(url + ": says it started, but has not finished after " + waitForEnd.String() + " (" + strconv.FormatUint((*fileFinishedResult)[0].WrittenBytes, 10) + " bytes written)")
} else {
log.Infoln(url + ": finished after " + time.Since(startTime).String() + " , with " + strconv.FormatUint((*fileFinishedResult)[0].WrittenBytes, 10) + " bytes downloaded")
result.finished = true
}
return result, nil
}

View file

@ -1,26 +0,0 @@
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
)
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "lbry",
Short: "A command-line swiss army knife for LBRY",
// Uncomment the following line if your bare application
// has an action associated with it:
// Run: func(cmd *cobra.Command, args []string) { },
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View file

@ -1,35 +0,0 @@
package cmd
import (
"os"
"os/signal"
"sync"
"syscall"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func init() {
var testCmd = &cobra.Command{
Use: "test",
Short: "For testing stuff",
Run: test,
}
RootCmd.AddCommand(testCmd)
}
func test(cmd *cobra.Command, args []string) {
var wg sync.WaitGroup
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
wg.Add(1)
go func() {
defer wg.Done()
<-c
log.Println("got signal")
}()
log.Println("waiting for ctrl+c")
wg.Wait()
log.Println("done waiting")
}

View file

@ -1,184 +0,0 @@
package cmd
import (
"os"
"time"
"os/user"
"github.com/lbryio/lbry.go/util"
sync "github.com/lbryio/lbry.go/ytsync"
"github.com/lbryio/lbry.go/ytsync/sdk"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
const defaultMaxTries = 3
var (
stopOnError bool
maxTries int
takeOverExistingChannel bool
refill int
limit int
skipSpaceCheck bool
syncUpdate bool
singleRun bool
syncStatus string
channelID string
syncFrom int64
syncUntil int64
concurrentJobs int
videosLimit int
maxVideoSize int
)
func init() {
var ytSyncCmd = &cobra.Command{
Use: "ytsync",
Args: cobra.RangeArgs(0, 0),
Short: "Publish youtube channels into LBRY network automatically.",
Run: ytSync,
}
ytSyncCmd.Flags().BoolVar(&stopOnError, "stop-on-error", false, "If a publish fails, stop all publishing and exit")
ytSyncCmd.Flags().IntVar(&maxTries, "max-tries", defaultMaxTries, "Number of times to try a publish that fails")
ytSyncCmd.Flags().BoolVar(&takeOverExistingChannel, "takeover-existing-channel", false, "If channel exists and we don't own it, take over the channel")
ytSyncCmd.Flags().IntVar(&limit, "limit", 0, "limit the amount of channels to sync")
ytSyncCmd.Flags().BoolVar(&skipSpaceCheck, "skip-space-check", false, "Do not perform free space check on startup")
ytSyncCmd.Flags().BoolVar(&syncUpdate, "update", false, "Update previously synced channels instead of syncing new ones")
ytSyncCmd.Flags().BoolVar(&singleRun, "run-once", false, "Whether the process should be stopped after one cycle or not")
ytSyncCmd.Flags().StringVar(&syncStatus, "status", "", "Specify which queue to pull from. Overrides --update")
ytSyncCmd.Flags().StringVar(&channelID, "channelID", "", "If specified, only this channel will be synced.")
ytSyncCmd.Flags().Int64Var(&syncFrom, "after", time.Unix(0, 0).Unix(), "Specify from when to pull jobs [Unix time](Default: 0)")
ytSyncCmd.Flags().Int64Var(&syncUntil, "before", time.Now().Unix(), "Specify until when to pull jobs [Unix time](Default: current Unix time)")
ytSyncCmd.Flags().IntVar(&concurrentJobs, "concurrent-jobs", 1, "how many jobs to process concurrently")
ytSyncCmd.Flags().IntVar(&videosLimit, "videos-limit", 1000, "how many videos to process per channel")
ytSyncCmd.Flags().IntVar(&maxVideoSize, "max-size", 2048, "Maximum video size to process (in MB)")
RootCmd.AddCommand(ytSyncCmd)
}
func ytSync(cmd *cobra.Command, args []string) {
var hostname string
slackToken := os.Getenv("SLACK_TOKEN")
if slackToken == "" {
log.Error("A slack token was not present in env vars! Slack messages disabled!")
} else {
var err error
hostname, err = os.Hostname()
if err != nil {
log.Error("could not detect system hostname")
hostname = "ytsync-unknown"
}
util.InitSlack(os.Getenv("SLACK_TOKEN"), os.Getenv("SLACK_CHANNEL"), hostname)
}
if syncStatus != "" && !util.InSlice(syncStatus, sync.SyncStatuses) {
log.Errorf("status must be one of the following: %v\n", sync.SyncStatuses)
return
}
if stopOnError && maxTries != defaultMaxTries {
log.Errorln("--stop-on-error and --max-tries are mutually exclusive")
return
}
if maxTries < 1 {
log.Errorln("setting --max-tries less than 1 doesn't make sense")
return
}
if limit < 0 {
log.Errorln("setting --limit less than 0 (unlimited) doesn't make sense")
return
}
apiURL := os.Getenv("LBRY_API")
apiToken := os.Getenv("LBRY_API_TOKEN")
youtubeAPIKey := os.Getenv("YOUTUBE_API_KEY")
blobsDir := os.Getenv("BLOBS_DIRECTORY")
lbrycrdString := os.Getenv("LBRYCRD_STRING")
awsS3ID := os.Getenv("AWS_S3_ID")
awsS3Secret := os.Getenv("AWS_S3_SECRET")
awsS3Region := os.Getenv("AWS_S3_REGION")
awsS3Bucket := os.Getenv("AWS_S3_BUCKET")
if apiURL == "" {
log.Errorln("An API URL was not defined. Please set the environment variable LBRY_API")
return
}
if apiToken == "" {
log.Errorln("An API Token was not defined. Please set the environment variable LBRY_API_TOKEN")
return
}
if youtubeAPIKey == "" {
log.Errorln("A Youtube API key was not defined. Please set the environment variable YOUTUBE_API_KEY")
return
}
if awsS3ID == "" {
log.Errorln("AWS S3 ID credentials were not defined. Please set the environment variable AWS_S3_ID")
return
}
if awsS3Secret == "" {
log.Errorln("AWS S3 Secret credentials were not defined. Please set the environment variable AWS_S3_SECRET")
return
}
if awsS3Region == "" {
log.Errorln("AWS S3 Region was not defined. Please set the environment variable AWS_S3_REGION")
return
}
if awsS3Bucket == "" {
log.Errorln("AWS S3 Bucket was not defined. Please set the environment variable AWS_S3_BUCKET")
return
}
if lbrycrdString == "" {
log.Infoln("Using default (local) lbrycrd instance. Set LBRYCRD_STRING if you want to use something else")
}
if blobsDir == "" {
usr, err := user.Current()
if err != nil {
log.Errorln(err.Error())
return
}
blobsDir = usr.HomeDir + "/.lbrynet/blobfiles/"
}
syncProperties := &sdk.SyncProperties{
SyncFrom: syncFrom,
SyncUntil: syncUntil,
YoutubeChannelID: channelID,
}
apiConfig := &sdk.APIConfig{
YoutubeAPIKey: youtubeAPIKey,
ApiURL: apiURL,
ApiToken: apiToken,
HostName: hostname,
}
sm := sync.NewSyncManager(
stopOnError,
maxTries,
takeOverExistingChannel,
refill,
limit,
skipSpaceCheck,
syncUpdate,
concurrentJobs,
concurrentJobs,
blobsDir,
videosLimit,
maxVideoSize,
lbrycrdString,
awsS3ID,
awsS3Secret,
awsS3Region,
awsS3Bucket,
syncStatus,
singleRun,
syncProperties,
apiConfig,
)
err := sm.Start()
if err != nil {
sync.SendErrorToSlack(err.Error())
}
sync.SendInfoToSlack("Syncing process terminated!")
}

1
dht/.gitignore vendored
View file

@ -1 +0,0 @@
.DS_Store

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Dean Karn
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,87 +0,0 @@
![](https://raw.githubusercontent.com/shiyanhui/dht/master/doc/screen-shot.png)
See the video on the [Youtube](https://www.youtube.com/watch?v=AIpeQtw22kc).
[中文版README](https://github.com/shiyanhui/dht/blob/master/README_CN.md)
## Introduction
DHT implements the bittorrent DHT protocol in Go. Now it includes:
- [BEP-3 (part)](http://www.bittorrent.org/beps/bep_0003.html)
- [BEP-5](http://www.bittorrent.org/beps/bep_0005.html)
- [BEP-9](http://www.bittorrent.org/beps/bep_0009.html)
- [BEP-10](http://www.bittorrent.org/beps/bep_0010.html)
It contains two modes, the standard mode and the crawling mode. The standard
mode follows the BEPs, and you can use it as a standard dht server. The crawling
mode aims to crawl as more metadata info as possiple. It doesn't follow the
standard BEPs protocol. With the crawling mode, you can build another [BTDigg](http://btdigg.org/).
[bthub.io](http://bthub.io) is a BT search engine based on the crawling mode.
## Installation
go get github.com/shiyanhui/dht
## Example
Below is a simple spider. You can move [here](https://github.com/shiyanhui/dht/blob/master/sample)
to see more samples.
```go
import (
"fmt"
"github.com/shiyanhui/dht"
)
func main() {
downloader := dht.NewWire(65535)
go func() {
// once we got the request result
for resp := range downloader.Response() {
fmt.Println(resp.InfoHash, resp.MetadataInfo)
}
}()
go downloader.Run()
config := dht.NewCrawlConfig()
config.OnAnnouncePeer = func(infoHash, ip string, port int) {
// request to download the metadata info
downloader.Request([]byte(infoHash), ip, port)
}
d := dht.New(config)
d.Run()
}
```
## Download
You can download the demo compiled binary file [here](https://github.com/shiyanhui/dht/files/407021/spider.zip).
## Note
- The default crawl mode configure costs about 300M RAM. Set **MaxNodes**
and **BlackListMaxSize** to fit yourself.
- Now it cant't run in LAN because of NAT.
## TODO
- [ ] NAT Traversal.
- [ ] Implements the full BEP-3.
- [ ] Optimization.
## FAQ
#### Why it is slow compared to other spiders ?
Well, maybe there are several reasons.
- DHT aims to implements the standard BitTorrent DHT protocol, not born for crawling the DHT network.
- NAT Traversal issue. You run the crawler in a local network.
- It will block ip which looks like bad and a good ip may be mis-judged.
## License
MIT, read more [here](https://github.com/shiyanhui/dht/blob/master/LICENSE)

View file

@ -1,163 +0,0 @@
package dht
import (
"fmt"
"strings"
)
// bitmap represents a bit array.
type bitmap struct {
Size int
data []byte
}
// newBitmap returns a size-length bitmap pointer.
func newBitmap(size int) *bitmap {
div, mod := size/8, size%8
if mod > 0 {
div++
}
return &bitmap{size, make([]byte, div)}
}
// newBitmapFrom returns a new copyed bitmap pointer which
// newBitmap.data = other.data[:size].
func newBitmapFrom(other *bitmap, size int) *bitmap {
bitmap := newBitmap(size)
if size > other.Size {
size = other.Size
}
div := size / 8
for i := 0; i < div; i++ {
bitmap.data[i] = other.data[i]
}
for i := div * 8; i < size; i++ {
if other.Bit(i) == 1 {
bitmap.Set(i)
}
}
return bitmap
}
// newBitmapFromBytes returns a bitmap pointer created from a byte array.
func newBitmapFromBytes(data []byte) *bitmap {
bitmap := newBitmap(len(data) * 8)
copy(bitmap.data, data)
return bitmap
}
// newBitmapFromString returns a bitmap pointer created from a string.
func newBitmapFromString(data string) *bitmap {
return newBitmapFromBytes([]byte(data))
}
// Bit returns the bit at index.
func (bitmap *bitmap) Bit(index int) int {
if index >= bitmap.Size {
panic("index out of range")
}
div, mod := index/8, index%8
return int((uint(bitmap.data[div]) & (1 << uint(7-mod))) >> uint(7-mod))
}
// set sets the bit at index `index`. If bit is true, set 1, otherwise set 0.
func (bitmap *bitmap) set(index int, bit int) {
if index >= bitmap.Size {
panic("index out of range")
}
div, mod := index/8, index%8
shift := byte(1 << uint(7-mod))
bitmap.data[div] &= ^shift
if bit > 0 {
bitmap.data[div] |= shift
}
}
// Set sets the bit at idnex to 1.
func (bitmap *bitmap) Set(index int) {
bitmap.set(index, 1)
}
// Unset sets the bit at idnex to 0.
func (bitmap *bitmap) Unset(index int) {
bitmap.set(index, 0)
}
// Compare compares the prefixLen-prefix of two bitmap.
// - If bitmap.data[:prefixLen] < other.data[:prefixLen], return -1.
// - If bitmap.data[:prefixLen] > other.data[:prefixLen], return 1.
// - Otherwise return 0.
func (bitmap *bitmap) Compare(other *bitmap, prefixLen int) int {
if prefixLen > bitmap.Size || prefixLen > other.Size {
panic("index out of range")
}
div, mod := prefixLen/8, prefixLen%8
for i := 0; i < div; i++ {
if bitmap.data[i] > other.data[i] {
return 1
} else if bitmap.data[i] < other.data[i] {
return -1
}
}
for i := div * 8; i < div*8+mod; i++ {
bit1, bit2 := bitmap.Bit(i), other.Bit(i)
if bit1 > bit2 {
return 1
} else if bit1 < bit2 {
return -1
}
}
return 0
}
// Xor returns the xor value of two bitmap.
func (bitmap *bitmap) Xor(other *bitmap) *bitmap {
if bitmap.Size != other.Size {
panic("size not the same")
}
distance := newBitmap(bitmap.Size)
div, mod := distance.Size/8, distance.Size%8
for i := 0; i < div; i++ {
distance.data[i] = bitmap.data[i] ^ other.data[i]
}
for i := div * 8; i < div*8+mod; i++ {
distance.set(i, bitmap.Bit(i)^other.Bit(i))
}
return distance
}
// String returns the bit sequence string of the bitmap.
func (bitmap *bitmap) String() string {
div, mod := bitmap.Size/8, bitmap.Size%8
buff := make([]string, div+mod)
for i := 0; i < div; i++ {
buff[i] = fmt.Sprintf("%08b", bitmap.data[i])
}
for i := div; i < div+mod; i++ {
buff[i] = fmt.Sprintf("%1b", bitmap.Bit(div*8+(i-div)))
}
return strings.Join(buff, "")
}
// RawString returns the string value of bitmap.data.
func (bitmap *bitmap) RawString() string {
return string(bitmap.data)
}

View file

@ -1,69 +0,0 @@
package dht
import (
"testing"
)
func TestBitmap(t *testing.T) {
a := newBitmap(10)
b := newBitmapFrom(a, 10)
c := newBitmapFromBytes([]byte{48, 49, 50, 51, 52, 53, 54, 55, 56, 57})
d := newBitmapFromString("0123456789")
e := newBitmap(10)
// Bit
for i := 0; i < a.Size; i++ {
if a.Bit(i) != 0 {
t.Fail()
}
}
// Compare
if c.Compare(d, d.Size) != 0 {
t.Fail()
}
// RawString
if c.RawString() != d.RawString() || c.RawString() != "0123456789" {
t.Fail()
}
// Set
b.Set(5)
if b.Bit(5) != 1 {
t.Fail()
}
// Unset
b.Unset(5)
if b.Bit(5) == 1 {
t.Fail()
}
// String
if e.String() != "0000000000" {
t.Fail()
}
e.Set(9)
if e.String() != "0000000001" {
t.Fail()
}
e.Set(2)
if e.String() != "0010000001" {
t.Fail()
}
a.Set(0)
a.Set(5)
a.Set(8)
if a.String() != "1000010010" {
t.Fail()
}
// Xor
b.Set(5)
b.Set(9)
if a.Xor(b).String() != "1000000011" {
t.Fail()
}
}

399
dht/bits/bitmap.go Normal file
View file

@ -0,0 +1,399 @@
package bits
import (
"crypto/rand"
"encoding/hex"
"math/big"
"strconv"
"strings"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lyoshenka/bencode"
)
// TODO: http://roaringbitmap.org/
const (
NumBytes = 48 // bytes
NumBits = NumBytes * 8
)
// Bitmap is a generalized representation of an identifier or data that can be sorted, compared fast. Used by the DHT
// package as a way to handle the unique identifiers of a DHT node.
type Bitmap [NumBytes]byte
func (b Bitmap) RawString() string {
return string(b[:])
}
func (b Bitmap) String() string {
return b.Hex()
}
// BString returns the bitmap as a string of 0s and 1s
func (b Bitmap) BString() string {
var s string
for _, byte := range b {
s += strconv.FormatInt(int64(byte), 2)
}
return s
}
// Hex returns a hexadecimal representation of the bitmap.
func (b Bitmap) Hex() string {
return hex.EncodeToString(b[:])
}
// HexShort returns a hexadecimal representation of the first 4 bytes.
func (b Bitmap) HexShort() string {
return hex.EncodeToString(b[:4])
}
// HexSimplified returns the hexadecimal representation with all leading 0's removed
func (b Bitmap) HexSimplified() string {
simple := strings.TrimLeft(b.Hex(), "0")
if simple == "" {
simple = "0"
}
return simple
}
func (b Bitmap) Big() *big.Int {
i := new(big.Int)
i.SetString(b.Hex(), 16)
return i
}
// Cmp compares b and other and returns:
//
// -1 if b < other
// 0 if b == other
// +1 if b > other
//
func (b Bitmap) Cmp(other Bitmap) int {
for k := range b {
if b[k] < other[k] {
return -1
} else if b[k] > other[k] {
return 1
}
}
return 0
}
// Closer returns true if dist(b,x) < dist(b,y)
func (b Bitmap) Closer(x, y Bitmap) bool {
return x.Xor(b).Cmp(y.Xor(b)) < 0
}
// Equals returns true if every byte in bitmap are equal, false otherwise
func (b Bitmap) Equals(other Bitmap) bool {
return b.Cmp(other) == 0
}
// Copy returns a duplicate value for the bitmap.
func (b Bitmap) Copy() Bitmap {
var ret Bitmap
copy(ret[:], b[:])
return ret
}
// Xor returns a diff bitmap. If they are equal, the returned bitmap will be all 0's. If 100% unique the returned
// bitmap will be all 1's.
func (b Bitmap) Xor(other Bitmap) Bitmap {
var ret Bitmap
for k := range b {
ret[k] = b[k] ^ other[k]
}
return ret
}
// And returns a comparison bitmap, that for each byte returns the AND true table result
func (b Bitmap) And(other Bitmap) Bitmap {
var ret Bitmap
for k := range b {
ret[k] = b[k] & other[k]
}
return ret
}
// Or returns a comparison bitmap, that for each byte returns the OR true table result
func (b Bitmap) Or(other Bitmap) Bitmap {
var ret Bitmap
for k := range b {
ret[k] = b[k] | other[k]
}
return ret
}
// Not returns a complimentary bitmap that is an inverse. So b.NOT.NOT = b
func (b Bitmap) Not() Bitmap {
var ret Bitmap
for k := range b {
ret[k] = ^b[k]
}
return ret
}
func (b Bitmap) add(other Bitmap) (Bitmap, bool) {
var ret Bitmap
carry := false
for i := NumBits - 1; i >= 0; i-- {
bBit := getBit(b[:], i)
oBit := getBit(other[:], i)
setBit(ret[:], i, bBit != oBit != carry)
carry = (bBit && oBit) || (bBit && carry) || (oBit && carry)
}
return ret, carry
}
// Add returns a bitmap that treats both bitmaps as numbers and adding them together. Since the size of a bitmap is
// limited, an overflow is possible when adding bitmaps.
func (b Bitmap) Add(other Bitmap) Bitmap {
ret, carry := b.add(other)
if carry {
panic("overflow in bitmap addition. limited to " + strconv.Itoa(NumBits) + " bits.")
}
return ret
}
// Sub returns a bitmap that treats both bitmaps as numbers and subtracts then via the inverse of the other and adding
// then together a + (-b). Negative bitmaps are not supported so other must be greater than this.
func (b Bitmap) Sub(other Bitmap) Bitmap {
if b.Cmp(other) < 0 {
// ToDo: Why is this not supported? Should it say not implemented? BitMap might have a generic use case outside of dht.
panic("negative bitmaps not supported")
}
complement, _ := other.Not().add(FromShortHexP("1"))
ret, _ := b.add(complement)
return ret
}
// Get returns the binary bit at the position passed.
func (b Bitmap) Get(n int) bool {
return getBit(b[:], n)
}
// Set sets the binary bit at the position passed.
func (b Bitmap) Set(n int, one bool) Bitmap {
ret := b.Copy()
setBit(ret[:], n, one)
return ret
}
// PrefixLen returns the number of leading 0 bits
func (b Bitmap) PrefixLen() int {
for i := range b {
for j := 0; j < 8; j++ {
if (b[i]>>uint8(7-j))&0x1 != 0 {
return i*8 + j
}
}
}
return NumBits
}
// Prefix returns a copy of b with the first n bits set to 1 (if `one` is true) or 0 (if `one` is false)
// https://stackoverflow.com/a/23192263/182709
func (b Bitmap) Prefix(n int, one bool) Bitmap {
ret := b.Copy()
Outer:
for i := range ret {
for j := 0; j < 8; j++ {
if i*8+j < n {
if one {
ret[i] |= 1 << uint(7-j)
} else {
ret[i] &= ^(1 << uint(7-j))
}
} else {
break Outer
}
}
}
return ret
}
// Suffix returns a copy of b with the last n bits set to 1 (if `one` is true) or 0 (if `one` is false)
// https://stackoverflow.com/a/23192263/182709
func (b Bitmap) Suffix(n int, one bool) Bitmap {
ret := b.Copy()
Outer:
for i := len(ret) - 1; i >= 0; i-- {
for j := 7; j >= 0; j-- {
if i*8+j >= NumBits-n {
if one {
ret[i] |= 1 << uint(7-j)
} else {
ret[i] &= ^(1 << uint(7-j))
}
} else {
break Outer
}
}
}
return ret
}
// MarshalBencode implements the Marshaller(bencode)/Message interface.
func (b Bitmap) MarshalBencode() ([]byte, error) {
str := string(b[:])
return bencode.EncodeBytes(str)
}
// UnmarshalBencode implements the Marshaller(bencode)/Message interface.
func (b *Bitmap) UnmarshalBencode(encoded []byte) error {
var str string
err := bencode.DecodeBytes(encoded, &str)
if err != nil {
return err
}
if len(str) != NumBytes {
return errors.Err("invalid bitmap length")
}
copy(b[:], str)
return nil
}
// FromBytes returns a bitmap as long as the byte array is of a specific length specified in the parameters.
func FromBytes(data []byte) (Bitmap, error) {
var bmp Bitmap
if len(data) != len(bmp) {
return bmp, errors.Err("invalid bitmap of length %d", len(data))
}
copy(bmp[:], data)
return bmp, nil
}
// FromBytesP returns a bitmap as long as the byte array is of a specific length specified in the parameters
// otherwise it wil panic.
func FromBytesP(data []byte) Bitmap {
bmp, err := FromBytes(data)
if err != nil {
panic(err)
}
return bmp
}
//FromString returns a bitmap by converting the string to bytes and creating from bytes as long as the byte array
// is of a specific length specified in the parameters
func FromString(data string) (Bitmap, error) {
return FromBytes([]byte(data))
}
//FromStringP returns a bitmap by converting the string to bytes and creating from bytes as long as the byte array
// is of a specific length specified in the parameters otherwise it wil panic.
func FromStringP(data string) Bitmap {
bmp, err := FromString(data)
if err != nil {
panic(err)
}
return bmp
}
//FromHex returns a bitmap by converting the hex string to bytes and creating from bytes as long as the byte array
// is of a specific length specified in the parameters
func FromHex(hexStr string) (Bitmap, error) {
decoded, err := hex.DecodeString(hexStr)
if err != nil {
return Bitmap{}, errors.Err(err)
}
return FromBytes(decoded)
}
//FromHexP returns a bitmap by converting the hex string to bytes and creating from bytes as long as the byte array
// is of a specific length specified in the parameters otherwise it wil panic.
func FromHexP(hexStr string) Bitmap {
bmp, err := FromHex(hexStr)
if err != nil {
panic(err)
}
return bmp
}
//FromShortHex returns a bitmap by converting the hex string to bytes, adding the leading zeros prefix to the
// hex string and creating from bytes as long as the byte array is of a specific length specified in the parameters
func FromShortHex(hexStr string) (Bitmap, error) {
return FromHex(strings.Repeat("0", NumBytes*2-len(hexStr)) + hexStr)
}
//FromShortHexP returns a bitmap by converting the hex string to bytes, adding the leading zeros prefix to the
// hex string and creating from bytes as long as the byte array is of a specific length specified in the parameters
// otherwise it wil panic.
func FromShortHexP(hexStr string) Bitmap {
bmp, err := FromShortHex(hexStr)
if err != nil {
panic(err)
}
return bmp
}
func FromBigP(b *big.Int) Bitmap {
return FromShortHexP(b.Text(16))
}
// MaxP returns a bitmap with all bits set to 1
func MaxP() Bitmap {
return FromHexP(strings.Repeat("f", NumBytes*2))
}
// Rand generates a cryptographically random bitmap with the confines of the parameters specified.
func Rand() Bitmap {
var id Bitmap
_, err := rand.Read(id[:])
if err != nil {
panic(err)
}
return id
}
// RandInRangeP generates a cryptographically random bitmap and while it is greater than the high threshold
// bitmap will subtract the diff between high and low until it is no longer greater that the high.
func RandInRangeP(low, high Bitmap) Bitmap {
diff := high.Sub(low)
r := Rand()
for r.Cmp(diff) > 0 {
r = r.Sub(diff)
}
//ToDo - Adding the low at this point doesn't gurantee it will be within the range. Consider bitmaps as numbers and
// I have a range of 50-100. If get to say 60, and add 50, I would be at 110. Should protect against this?
return r.Add(low)
}
func getBit(b []byte, n int) bool {
i := n / 8
j := n % 8
return b[i]&(1<<uint(7-j)) > 0
}
func setBit(b []byte, n int, one bool) {
i := n / 8
j := n % 8
if one {
b[i] |= 1 << uint(7-j)
} else {
b[i] &= ^(1 << uint(7-j))
}
}
// Closest returns the closest bitmap to target. if no bitmaps are provided, target itself is returned
func Closest(target Bitmap, bitmaps ...Bitmap) Bitmap {
if len(bitmaps) == 0 {
return target
}
var closest *Bitmap
for _, b := range bitmaps {
if closest == nil || target.Closer(b, *closest) {
closest = &b
}
}
return *closest
}

386
dht/bits/bitmap_test.go Normal file
View file

@ -0,0 +1,386 @@
package bits
import (
"fmt"
"testing"
"github.com/lyoshenka/bencode"
)
func TestBitmap(t *testing.T) {
a := Bitmap{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
}
b := Bitmap{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 47, 46,
}
c := Bitmap{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
}
if !a.Equals(a) {
t.Error("bitmap does not equal itself")
}
if a.Equals(b) {
t.Error("bitmap equals another bitmap with different id")
}
if !a.Xor(b).Equals(c) {
t.Error(a.Xor(b))
}
if c.PrefixLen() != 375 {
t.Error(c.PrefixLen())
}
if b.Cmp(a) < 0 {
t.Error("bitmap fails Cmp test")
}
if a.Closer(c, b) || !a.Closer(b, c) || c.Closer(a, b) || c.Closer(b, c) {
t.Error("bitmap fails Closer test")
}
id := "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
if FromHexP(id).Hex() != id {
t.Error(FromHexP(id).Hex())
}
}
func TestBitmap_GetBit(t *testing.T) {
tt := []struct {
bit int
expected bool
panic bool
}{
{bit: 383, expected: false, panic: false},
{bit: 382, expected: true, panic: false},
{bit: 381, expected: false, panic: false},
{bit: 380, expected: true, panic: false},
}
b := FromShortHexP("a")
for _, test := range tt {
actual := getBit(b[:], test.bit)
if test.expected != actual {
t.Errorf("getting bit %d of %s: expected %t, got %t", test.bit, b.HexSimplified(), test.expected, actual)
}
}
}
func TestBitmap_SetBit(t *testing.T) {
tt := []struct {
hex string
bit int
one bool
expected string
panic bool
}{
{hex: "0", bit: 383, one: true, expected: "1", panic: false},
{hex: "0", bit: 382, one: true, expected: "2", panic: false},
{hex: "0", bit: 381, one: true, expected: "4", panic: false},
{hex: "0", bit: 385, one: true, expected: "1", panic: true},
{hex: "0", bit: 384, one: true, expected: "1", panic: true},
}
for _, test := range tt {
expected := FromShortHexP(test.expected)
actual := FromShortHexP(test.hex)
if test.panic {
assertPanic(t, fmt.Sprintf("setting bit %d to %t", test.bit, test.one), func() { setBit(actual[:], test.bit, test.one) })
} else {
setBit(actual[:], test.bit, test.one)
if !expected.Equals(actual) {
t.Errorf("setting bit %d to %t: expected %s, got %s", test.bit, test.one, test.expected, actual.HexSimplified())
}
}
}
}
func TestBitmap_FromHexShort(t *testing.T) {
tt := []struct {
short string
long string
}{
{short: "", long: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{short: "0", long: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{short: "00000", long: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{short: "9473745bc", long: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000009473745bc"},
{short: "09473745bc", long: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000009473745bc"},
{short: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
long: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"},
}
for _, test := range tt {
short := FromShortHexP(test.short)
long := FromHexP(test.long)
if !short.Equals(long) {
t.Errorf("short hex %s: expected %s, got %s", test.short, long.Hex(), short.Hex())
}
}
}
func TestBitmapMarshal(t *testing.T) {
b := FromStringP("123456789012345678901234567890123456789012345678")
encoded, err := bencode.EncodeBytes(b)
if err != nil {
t.Error(err)
}
if string(encoded) != "48:123456789012345678901234567890123456789012345678" {
t.Error("encoding does not match expected")
}
}
func TestBitmapMarshalEmbedded(t *testing.T) {
e := struct {
A string
B Bitmap
C int
}{
A: "1",
B: FromStringP("222222222222222222222222222222222222222222222222"),
C: 3,
}
encoded, err := bencode.EncodeBytes(e)
if err != nil {
t.Error(err)
}
if string(encoded) != "d1:A1:11:B48:2222222222222222222222222222222222222222222222221:Ci3ee" {
t.Error("encoding does not match expected")
}
}
func TestBitmapMarshalEmbedded2(t *testing.T) {
encoded, err := bencode.EncodeBytes([]interface{}{
FromStringP("333333333333333333333333333333333333333333333333"),
})
if err != nil {
t.Error(err)
}
if string(encoded) != "l48:333333333333333333333333333333333333333333333333e" {
t.Error("encoding does not match expected")
}
}
func TestBitmap_PrefixLen(t *testing.T) {
tt := []struct {
hex string
len int
}{
{len: 0, hex: "F00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{len: 0, hex: "800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{len: 1, hex: "700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{len: 1, hex: "400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{len: 384, hex: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{len: 383, hex: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"},
{len: 382, hex: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002"},
{len: 382, hex: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003"},
}
for _, test := range tt {
len := FromHexP(test.hex).PrefixLen()
if len != test.len {
t.Errorf("got prefix len %d; expected %d for %s", len, test.len, test.hex)
}
}
}
func TestBitmap_Prefix(t *testing.T) {
allOne := FromHexP("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
zerosTT := []struct {
zeros int
expected string
}{
{zeros: -123, expected: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{zeros: 0, expected: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{zeros: 1, expected: "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{zeros: 69, expected: "000000000000000007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{zeros: 383, expected: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"},
{zeros: 384, expected: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{zeros: 400, expected: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
}
for _, test := range zerosTT {
expected := FromHexP(test.expected)
actual := allOne.Prefix(test.zeros, false)
if !actual.Equals(expected) {
t.Errorf("%d zeros: got %s; expected %s", test.zeros, actual.Hex(), expected.Hex())
}
}
for i := 0; i < NumBits; i++ {
b := allOne.Prefix(i, false)
if b.PrefixLen() != i {
t.Errorf("got prefix len %d; expected %d for %s", b.PrefixLen(), i, b.Hex())
}
}
allZero := FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
onesTT := []struct {
ones int
expected string
}{
{ones: -123, expected: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{ones: 0, expected: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{ones: 1, expected: "800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{ones: 69, expected: "fffffffffffffffff8000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{ones: 383, expected: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"},
{ones: 384, expected: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{ones: 400, expected: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
}
for _, test := range onesTT {
expected := FromHexP(test.expected)
actual := allZero.Prefix(test.ones, true)
if !actual.Equals(expected) {
t.Errorf("%d ones: got %s; expected %s", test.ones, actual.Hex(), expected.Hex())
}
}
}
func TestBitmap_Suffix(t *testing.T) {
allOne := FromHexP("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
zerosTT := []struct {
zeros int
expected string
}{
{zeros: -123, expected: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{zeros: 0, expected: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{zeros: 1, expected: "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"},
{zeros: 69, expected: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00000000000000000"},
{zeros: 383, expected: "800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{zeros: 384, expected: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{zeros: 400, expected: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
}
for _, test := range zerosTT {
expected := FromHexP(test.expected)
actual := allOne.Suffix(test.zeros, false)
if !actual.Equals(expected) {
t.Errorf("%d zeros: got %s; expected %s", test.zeros, actual.Hex(), expected.Hex())
}
}
for i := 0; i < NumBits; i++ {
b := allOne.Prefix(i, false)
if b.PrefixLen() != i {
t.Errorf("got prefix len %d; expected %d for %s", b.PrefixLen(), i, b.Hex())
}
}
allZero := FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
onesTT := []struct {
ones int
expected string
}{
{ones: -123, expected: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{ones: 0, expected: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},
{ones: 1, expected: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"},
{ones: 69, expected: "0000000000000000000000000000000000000000000000000000000000000000000000000000001fffffffffffffffff"},
{ones: 383, expected: "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{ones: 384, expected: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{ones: 400, expected: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
}
for _, test := range onesTT {
expected := FromHexP(test.expected)
actual := allZero.Suffix(test.ones, true)
if !actual.Equals(expected) {
t.Errorf("%d ones: got %s; expected %s", test.ones, actual.Hex(), expected.Hex())
}
}
}
func TestBitmap_Add(t *testing.T) {
tt := []struct {
a, b, sum string
panic bool
}{
{"0", "0", "0", false},
{"0", "1", "1", false},
{"1", "0", "1", false},
{"1", "1", "2", false},
{"8", "4", "c", false},
{"1000", "0010", "1010", false},
{"1111", "1111", "2222", false},
{"ffff", "1", "10000", false},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", false},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "1", "", true},
}
for _, test := range tt {
a := FromShortHexP(test.a)
b := FromShortHexP(test.b)
expected := FromShortHexP(test.sum)
if test.panic {
assertPanic(t, fmt.Sprintf("adding %s and %s", test.a, test.b), func() { a.Add(b) })
} else {
actual := a.Add(b)
if !expected.Equals(actual) {
t.Errorf("adding %s and %s; expected %s, got %s", test.a, test.b, test.sum, actual.HexSimplified())
}
}
}
}
func TestBitmap_Sub(t *testing.T) {
tt := []struct {
a, b, sum string
panic bool
}{
{"0", "0", "0", false},
{"1", "0", "1", false},
{"1", "1", "0", false},
{"8", "4", "4", false},
{"f", "9", "6", false},
{"f", "e", "1", false},
{"10", "f", "1", false},
{"2222", "1111", "1111", false},
{"ffff", "1", "fffe", false},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", false},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0", false},
{"0", "1", "", true},
}
for _, test := range tt {
a := FromShortHexP(test.a)
b := FromShortHexP(test.b)
expected := FromShortHexP(test.sum)
if test.panic {
assertPanic(t, fmt.Sprintf("subtracting %s - %s", test.a, test.b), func() { a.Sub(b) })
} else {
actual := a.Sub(b)
if !expected.Equals(actual) {
t.Errorf("subtracting %s - %s; expected %s, got %s", test.a, test.b, test.sum, actual.HexSimplified())
}
}
}
}
func assertPanic(t *testing.T, text string, f func()) {
defer func() {
if r := recover(); r == nil {
t.Errorf("%s: did not panic as expected", text)
}
}()
f()
}

65
dht/bits/range.go Normal file
View file

@ -0,0 +1,65 @@
package bits
import (
"math/big"
"github.com/lbryio/lbry.go/v2/extras/errors"
)
// Range has a start and end
type Range struct {
Start Bitmap
End Bitmap
}
func MaxRange() Range {
return Range{
Start: Bitmap{},
End: MaxP(),
}
}
// IntervalP divides the range into `num` intervals and returns the `n`th one
// intervals are approximately the same size, but may not be exact because of rounding issues
// the first interval always starts at the beginning of the range, and the last interval always ends at the end
func (r Range) IntervalP(n, num int) Range {
if num < 1 || n < 1 || n > num {
panic(errors.Err("invalid interval %d of %d", n, num))
}
start := r.intervalStart(n, num)
end := r.End.Big()
if n < num {
end = r.intervalStart(n+1, num)
end.Sub(end, big.NewInt(1))
}
return Range{FromBigP(start), FromBigP(end)}
}
func (r Range) intervalStart(n, num int) *big.Int {
// formula:
// size = (end - start) / num
// rem = (end - start) % num
// intervalStart = rangeStart + (size * n-1) + ((rem * n-1) % num)
size := new(big.Int)
rem := new(big.Int)
size.Sub(r.End.Big(), r.Start.Big()).DivMod(size, big.NewInt(int64(num)), rem)
size.Mul(size, big.NewInt(int64(n-1)))
rem.Mul(rem, big.NewInt(int64(n-1))).Mod(rem, big.NewInt(int64(num)))
start := r.Start.Big()
start.Add(start, size).Add(start, rem)
return start
}
func (r Range) IntervalSize() *big.Int {
return (&big.Int{}).Sub(r.End.Big(), r.Start.Big())
}
func (r Range) Contains(b Bitmap) bool {
return r.Start.Cmp(b) <= 0 && r.End.Cmp(b) >= 0
}

48
dht/bits/range_test.go Normal file
View file

@ -0,0 +1,48 @@
package bits
import (
"math/big"
"testing"
)
func TestMaxRange(t *testing.T) {
start := FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
end := FromHexP("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
r := MaxRange()
if !r.Start.Equals(start) {
t.Error("max range does not start at the beginning")
}
if !r.End.Equals(end) {
t.Error("max range does not end at the end")
}
}
func TestRange_IntervalP(t *testing.T) {
max := MaxRange()
numIntervals := 97
expectedAvg := (&big.Int{}).Div(max.IntervalSize(), big.NewInt(int64(numIntervals)))
maxDiff := big.NewInt(int64(numIntervals))
var lastEnd Bitmap
for i := 1; i <= numIntervals; i++ {
ival := max.IntervalP(i, numIntervals)
if i == 1 && !ival.Start.Equals(max.Start) {
t.Error("first interval does not start at 0")
}
if i == numIntervals && !ival.End.Equals(max.End) {
t.Error("last interval does not end at max")
}
if i > 1 && !ival.Start.Equals(lastEnd.Add(FromShortHexP("1"))) {
t.Errorf("interval %d of %d: last end was %s, this start is %s", i, numIntervals, lastEnd.Hex(), ival.Start.Hex())
}
if ival.IntervalSize().Cmp((&big.Int{}).Add(expectedAvg, maxDiff)) > 0 || ival.IntervalSize().Cmp((&big.Int{}).Sub(expectedAvg, maxDiff)) < 0 {
t.Errorf("interval %d of %d: interval size is outside the normal range", i, numIntervals)
}
lastEnd = ival.End
}
}

212
dht/bootstrap.go Normal file
View file

@ -0,0 +1,212 @@
package dht
import (
"math/rand"
"net"
"sync"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
)
const (
bootstrapDefaultRefreshDuration = 15 * time.Minute
)
// BootstrapNode is a configured node setup for testing.
type BootstrapNode struct {
Node
initialPingInterval time.Duration
checkInterval time.Duration
nlock *sync.RWMutex
peers map[bits.Bitmap]*peer
nodeIDs []bits.Bitmap // necessary for efficient random ID selection
}
// NewBootstrapNode returns a BootstrapNode pointer.
func NewBootstrapNode(id bits.Bitmap, initialPingInterval, rePingInterval time.Duration) *BootstrapNode {
b := &BootstrapNode{
Node: *NewNode(id),
initialPingInterval: initialPingInterval,
checkInterval: rePingInterval,
nlock: &sync.RWMutex{},
peers: make(map[bits.Bitmap]*peer),
nodeIDs: make([]bits.Bitmap, 0),
}
b.requestHandler = b.handleRequest
return b
}
// Add manually adds a contact
func (b *BootstrapNode) Add(c Contact) {
b.upsert(c)
}
// Connect connects to the given connection and starts any background threads necessary
func (b *BootstrapNode) Connect(conn UDPConn) error {
err := b.Node.Connect(conn)
if err != nil {
return err
}
log.Infof("[%s] bootstrap: node connected", b.id.HexShort())
go func() {
t := time.NewTicker(b.checkInterval / 5)
for {
select {
case <-t.C:
b.check()
case <-b.grp.Ch():
return
}
}
}()
return nil
}
// upsert adds the contact to the list, or updates the lastPinged time
func (b *BootstrapNode) upsert(c Contact) {
b.nlock.Lock()
defer b.nlock.Unlock()
if peer, exists := b.peers[c.ID]; exists {
log.Debugf("[%s] bootstrap: touching contact %s", b.id.HexShort(), peer.Contact.ID.HexShort())
peer.Touch()
return
}
log.Debugf("[%s] bootstrap: adding new contact %s", b.id.HexShort(), c.ID.HexShort())
b.peers[c.ID] = &peer{c, b.id.Xor(c.ID), time.Now(), 0}
b.nodeIDs = append(b.nodeIDs, c.ID)
}
// remove removes the contact from the list
func (b *BootstrapNode) remove(c Contact) {
b.nlock.Lock()
defer b.nlock.Unlock()
_, exists := b.peers[c.ID]
if !exists {
return
}
log.Debugf("[%s] bootstrap: removing contact %s", b.id.HexShort(), c.ID.HexShort())
delete(b.peers, c.ID)
for i := range b.nodeIDs {
if b.nodeIDs[i].Equals(c.ID) {
b.nodeIDs = append(b.nodeIDs[:i], b.nodeIDs[i+1:]...)
break
}
}
}
// get returns up to `limit` random contacts from the list
func (b *BootstrapNode) get(limit int) []Contact {
b.nlock.RLock()
defer b.nlock.RUnlock()
if len(b.peers) < limit {
limit = len(b.peers)
}
ret := make([]Contact, limit)
for i, k := range randKeys(len(b.nodeIDs))[:limit] {
ret[i] = b.peers[b.nodeIDs[k]].Contact
}
return ret
}
// ping pings a node. if the node responds, it is added to the list. otherwise, it is removed
func (b *BootstrapNode) ping(c Contact) {
log.Debugf("[%s] bootstrap: pinging %s", b.id.HexShort(), c.ID.HexShort())
b.grp.Add(1)
defer b.grp.Done()
resCh := b.SendAsync(c, Request{Method: pingMethod})
var res *Response
select {
case res = <-resCh:
case <-b.grp.Ch():
return
}
if res != nil && res.Data == pingSuccessResponse {
b.upsert(c)
} else {
b.remove(c)
}
}
func (b *BootstrapNode) check() {
b.nlock.RLock()
defer b.nlock.RUnlock()
for i := range b.peers {
if !b.peers[i].ActiveInLast(b.checkInterval) {
go b.ping(b.peers[i].Contact)
}
}
}
// handleRequest handles the requests received from udp.
func (b *BootstrapNode) handleRequest(addr *net.UDPAddr, request Request) {
switch request.Method {
case pingMethod:
err := b.sendMessage(addr, Response{ID: request.ID, NodeID: b.id, Data: pingSuccessResponse})
if err != nil {
log.Error("error sending response message - ", err)
}
case findNodeMethod:
if request.Arg == nil {
log.Errorln("request is missing arg")
return
}
err := b.sendMessage(addr, Response{
ID: request.ID,
NodeID: b.id,
Contacts: b.get(bucketSize),
})
if err != nil {
log.Error("error sending 'findnodemethod' response message - ", err)
}
}
go func() {
b.nlock.RLock()
_, exists := b.peers[request.NodeID]
b.nlock.RUnlock()
if !exists {
log.Debugf("[%s] bootstrap: queuing %s to ping", b.id.HexShort(), request.NodeID.HexShort())
<-time.After(b.initialPingInterval)
b.nlock.RLock()
_, exists = b.peers[request.NodeID]
b.nlock.RUnlock()
if !exists {
b.ping(Contact{ID: request.NodeID, IP: addr.IP, Port: addr.Port})
}
}
}()
}
func randKeys(max int) []int {
keys := make([]int, max)
for k := range keys {
keys[k] = k
}
rand.Shuffle(max, func(i, j int) {
keys[i], keys[j] = keys[j], keys[i]
})
return keys
}

24
dht/bootstrap_test.go Normal file
View file

@ -0,0 +1,24 @@
package dht
import (
"net"
"testing"
"github.com/lbryio/lbry.go/v2/dht/bits"
)
func TestBootstrapPing(t *testing.T) {
b := NewBootstrapNode(bits.Rand(), 10, bootstrapDefaultRefreshDuration)
listener, err := net.ListenPacket(Network, "127.0.0.1:54320")
if err != nil {
panic(err)
}
err = b.Connect(listener.(*net.UDPConn))
if err != nil {
t.Error(err)
}
b.Shutdown()
}

77
dht/config.go Normal file
View file

@ -0,0 +1,77 @@
package dht
import (
"strconv"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
)
const (
Network = "udp4"
DefaultPort = 4444
DefaultPeerPort = 3333
DefaultAnnounceRate = 10 // send at most this many announces per second
DefaultReannounceTime = 50 * time.Minute // should be a bit less than hash expiration time
// TODO: all these constants should be defaults, and should be used to set values in the standard Config. then the code should use values in the config
// TODO: alternatively, have a global Config for constants. at least that way tests can modify the values
alpha = 5 // this is the constant alpha in the spec
bucketSize = 8 // this is the constant k in the spec
nodeIDLength = bits.NumBytes // bytes. this is the constant B in the spec
messageIDLength = 20 // bytes.
udpRetry = 1
udpTimeout = 5 * time.Second
udpMaxMessageLength = 4096 // bytes. I think our longest message is ~676 bytes, so I rounded up to 1024
// scratch that. a findValue could return more than K results if a lot of nodes are storing that value, so we need more buffer
maxPeerFails = 3 // after this many failures, a peer is considered bad and will be removed from the routing table
//tExpire = 60 * time.Minute // the time after which a key/value pair expires; this is a time-to-live (TTL) from the original publication date
tRefresh = 1 * time.Hour // the time after which an otherwise unaccessed bucket must be refreshed
//tReplicate = 1 * time.Hour // the interval between Kademlia replication events, when a node is required to publish its entire database
//tNodeRefresh = 15 * time.Minute // the time after which a good node becomes questionable if it has not messaged us
compactNodeInfoLength = nodeIDLength + 6 // nodeID + 4 for IP + 2 for port
tokenSecretRotationInterval = 5 * time.Minute // how often the token-generating secret is rotated
)
// Config represents the configure of dht.
type Config struct {
// this node's address. format is `ip:port`
Address string
// the seed nodes through which we can join in dht network
SeedNodes []string
// the hex-encoded node id for this node. if string is empty, a random id will be generated
NodeID string
// print the state of the dht every X time
PrintState time.Duration
// the port that clients can use to download blobs using the LBRY peer protocol
PeerProtocolPort int
// if nonzero, an RPC server will listen to requests on this port and respond to them
RPCPort int
// the time after which the original publisher must reannounce a key/value pair
ReannounceTime time.Duration
// send at most this many announces per second
AnnounceRate int
// channel that will receive notifications about announcements
AnnounceNotificationCh chan announceNotification
}
// NewStandardConfig returns a Config pointer with default values.
func NewStandardConfig() *Config {
return &Config{
Address: "0.0.0.0:" + strconv.Itoa(DefaultPort),
SeedNodes: []string{
"lbrynet1.lbry.com:4444",
"lbrynet2.lbry.com:4444",
"lbrynet3.lbry.com:4444",
"lbrynet4.lbry.com:4444",
},
PeerProtocolPort: DefaultPeerPort,
ReannounceTime: DefaultReannounceTime,
AnnounceRate: DefaultAnnounceRate,
}
}

133
dht/contact.go Normal file
View file

@ -0,0 +1,133 @@
package dht
import (
"bytes"
"encoding/json"
"net"
"sort"
"strconv"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lyoshenka/bencode"
)
// TODO: if routing table is ever empty (aka the node is isolated), it should re-bootstrap
// Contact contains information for contacting another node on the network
type Contact struct {
ID bits.Bitmap
IP net.IP
Port int // the udp port used for the dht
PeerPort int // the tcp port a peer can be contacted on for blob requests
}
// Equals returns true if two contacts are the same.
func (c Contact) Equals(other Contact, checkID bool) bool {
return c.IP.Equal(other.IP) && c.Port == other.Port && (!checkID || c.ID == other.ID)
}
// Addr returns the address of the contact.
func (c Contact) Addr() *net.UDPAddr {
return &net.UDPAddr{IP: c.IP, Port: c.Port}
}
// String returns a short string representation of the contact
func (c Contact) String() string {
str := c.ID.HexShort() + "@" + c.Addr().String()
if c.PeerPort != 0 {
str += "(" + strconv.Itoa(c.PeerPort) + ")"
}
return str
}
func (c Contact) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
ID string
IP string
Port int
PeerPort int
}{
ID: c.ID.Hex(),
IP: c.IP.String(),
Port: c.Port,
PeerPort: c.PeerPort,
})
}
// MarshalCompact returns a compact byteslice representation of the contact
// NOTE: The compact representation always uses the tcp PeerPort, not the udp Port. This is dumb, but that's how the python daemon does it
func (c Contact) MarshalCompact() ([]byte, error) {
if c.IP.To4() == nil {
return nil, errors.Err("ip not set")
}
if c.PeerPort < 0 || c.PeerPort > 65535 {
return nil, errors.Err("invalid port")
}
var buf bytes.Buffer
buf.Write(c.IP.To4())
buf.WriteByte(byte(c.PeerPort >> 8))
buf.WriteByte(byte(c.PeerPort))
buf.Write(c.ID[:])
if buf.Len() != compactNodeInfoLength {
return nil, errors.Err("i dont know how this happened")
}
return buf.Bytes(), nil
}
// UnmarshalCompact unmarshals the compact byteslice representation of a contact.
// NOTE: The compact representation always uses the tcp PeerPort, not the udp Port. This is dumb, but that's how the python daemon does it
func (c *Contact) UnmarshalCompact(b []byte) error {
if len(b) != compactNodeInfoLength {
return errors.Err("invalid compact length")
}
c.IP = net.IPv4(b[0], b[1], b[2], b[3]).To4()
c.PeerPort = int(uint16(b[5]) | uint16(b[4])<<8)
c.ID = bits.FromBytesP(b[6:])
return nil
}
// MarshalBencode returns the serialized byte slice representation of a contact.
func (c Contact) MarshalBencode() ([]byte, error) {
return bencode.EncodeBytes([]interface{}{c.ID, c.IP.String(), c.Port})
}
// UnmarshalBencode unmarshals the serialized byte slice into the appropriate fields of the contact.
func (c *Contact) UnmarshalBencode(b []byte) error {
var raw []bencode.RawMessage
err := bencode.DecodeBytes(b, &raw)
if err != nil {
return err
}
if len(raw) != 3 {
return errors.Err("contact must have 3 elements; got %d", len(raw))
}
err = bencode.DecodeBytes(raw[0], &c.ID)
if err != nil {
return err
}
var ipStr string
err = bencode.DecodeBytes(raw[1], &ipStr)
if err != nil {
return err
}
c.IP = net.ParseIP(ipStr).To4()
if c.IP == nil {
return errors.Err("invalid IP")
}
return bencode.DecodeBytes(raw[2], &c.Port)
}
func sortByDistance(contacts []Contact, target bits.Bitmap) {
sort.Slice(contacts, func(i, j int) bool {
return contacts[i].ID.Xor(target).Cmp(contacts[j].ID.Xor(target)) < 0
})
}

31
dht/contact_test.go Normal file
View file

@ -0,0 +1,31 @@
package dht
import (
"net"
"reflect"
"testing"
"github.com/lbryio/lbry.go/v2/dht/bits"
)
func TestCompactEncoding(t *testing.T) {
c := Contact{
ID: bits.FromHexP("1c8aff71b99462464d9eeac639595ab99664be3482cb91a29d87467515c7d9158fe72aa1f1582dab07d8f8b5db277f41"),
IP: net.ParseIP("1.2.3.4"),
PeerPort: int(55<<8 + 66),
}
var compact []byte
compact, err := c.MarshalCompact()
if err != nil {
t.Fatal(err)
}
if len(compact) != compactNodeInfoLength {
t.Fatalf("got length of %d; expected %d", len(compact), compactNodeInfoLength)
}
if !reflect.DeepEqual(compact, append([]byte{1, 2, 3, 4, 55, 66}, c.ID[:]...)) {
t.Errorf("compact bytes not encoded correctly")
}
}

View file

@ -1,289 +0,0 @@
package dht
import (
"container/list"
"sync"
)
type mapItem struct {
key interface{}
val interface{}
}
// syncedMap represents a goroutine-safe map.
type syncedMap struct {
*sync.RWMutex
data map[interface{}]interface{}
}
// newSyncedMap returns a syncedMap pointer.
func newSyncedMap() *syncedMap {
return &syncedMap{
RWMutex: &sync.RWMutex{},
data: make(map[interface{}]interface{}),
}
}
// Get returns the value mapped to key.
func (smap *syncedMap) Get(key interface{}) (val interface{}, ok bool) {
smap.RLock()
defer smap.RUnlock()
val, ok = smap.data[key]
return
}
// Has returns whether the syncedMap contains the key.
func (smap *syncedMap) Has(key interface{}) bool {
_, ok := smap.Get(key)
return ok
}
// Set sets pair {key: val}.
func (smap *syncedMap) Set(key interface{}, val interface{}) {
smap.Lock()
defer smap.Unlock()
smap.data[key] = val
}
// Delete deletes the key in the map.
func (smap *syncedMap) Delete(key interface{}) {
smap.Lock()
defer smap.Unlock()
delete(smap.data, key)
}
// DeleteMulti deletes keys in batch.
func (smap *syncedMap) DeleteMulti(keys []interface{}) {
smap.Lock()
defer smap.Unlock()
for _, key := range keys {
delete(smap.data, key)
}
}
// Clear resets the data.
func (smap *syncedMap) Clear() {
smap.Lock()
defer smap.Unlock()
smap.data = make(map[interface{}]interface{})
}
// Iter returns a chan which output all items.
func (smap *syncedMap) Iter() <-chan mapItem {
ch := make(chan mapItem)
go func() {
smap.RLock()
for key, val := range smap.data {
ch <- mapItem{
key: key,
val: val,
}
}
smap.RUnlock()
close(ch)
}()
return ch
}
// Len returns the length of syncedMap.
func (smap *syncedMap) Len() int {
smap.RLock()
defer smap.RUnlock()
return len(smap.data)
}
// syncedList represents a goroutine-safe list.
type syncedList struct {
*sync.RWMutex
queue *list.List
}
// newSyncedList returns a syncedList pointer.
func newSyncedList() *syncedList {
return &syncedList{
RWMutex: &sync.RWMutex{},
queue: list.New(),
}
}
// Front returns the first element of slist.
func (slist *syncedList) Front() *list.Element {
slist.RLock()
defer slist.RUnlock()
return slist.queue.Front()
}
// Back returns the last element of slist.
func (slist *syncedList) Back() *list.Element {
slist.RLock()
defer slist.RUnlock()
return slist.queue.Back()
}
// PushFront pushs an element to the head of slist.
func (slist *syncedList) PushFront(v interface{}) *list.Element {
slist.Lock()
defer slist.Unlock()
return slist.queue.PushFront(v)
}
// PushBack pushs an element to the tail of slist.
func (slist *syncedList) PushBack(v interface{}) *list.Element {
slist.Lock()
defer slist.Unlock()
return slist.queue.PushBack(v)
}
// InsertBefore inserts v before mark.
func (slist *syncedList) InsertBefore(
v interface{}, mark *list.Element) *list.Element {
slist.Lock()
defer slist.Unlock()
return slist.queue.InsertBefore(v, mark)
}
// InsertAfter inserts v after mark.
func (slist *syncedList) InsertAfter(
v interface{}, mark *list.Element) *list.Element {
slist.Lock()
defer slist.Unlock()
return slist.queue.InsertAfter(v, mark)
}
// Remove removes e from the slist.
func (slist *syncedList) Remove(e *list.Element) interface{} {
slist.Lock()
defer slist.Unlock()
return slist.queue.Remove(e)
}
// Clear resets the list queue.
func (slist *syncedList) Clear() {
slist.Lock()
defer slist.Unlock()
slist.queue.Init()
}
// Len returns length of the slist.
func (slist *syncedList) Len() int {
slist.RLock()
defer slist.RUnlock()
return slist.queue.Len()
}
// Iter returns a chan which output all elements.
func (slist *syncedList) Iter() <-chan *list.Element {
ch := make(chan *list.Element)
go func() {
slist.RLock()
for e := slist.queue.Front(); e != nil; e = e.Next() {
ch <- e
}
slist.RUnlock()
close(ch)
}()
return ch
}
// KeyedDeque represents a keyed deque.
type keyedDeque struct {
*sync.RWMutex
*syncedList
index map[interface{}]*list.Element
invertedIndex map[*list.Element]interface{}
}
// newKeyedDeque returns a newKeyedDeque pointer.
func newKeyedDeque() *keyedDeque {
return &keyedDeque{
RWMutex: &sync.RWMutex{},
syncedList: newSyncedList(),
index: make(map[interface{}]*list.Element),
invertedIndex: make(map[*list.Element]interface{}),
}
}
// Push pushs a keyed-value to the end of deque.
func (deque *keyedDeque) Push(key interface{}, val interface{}) {
deque.Lock()
defer deque.Unlock()
if e, ok := deque.index[key]; ok {
deque.syncedList.Remove(e)
}
deque.index[key] = deque.syncedList.PushBack(val)
deque.invertedIndex[deque.index[key]] = key
}
// Get returns the keyed value.
func (deque *keyedDeque) Get(key interface{}) (*list.Element, bool) {
deque.RLock()
defer deque.RUnlock()
v, ok := deque.index[key]
return v, ok
}
// Has returns whether key already exists.
func (deque *keyedDeque) HasKey(key interface{}) bool {
_, ok := deque.Get(key)
return ok
}
// Delete deletes a value named key.
func (deque *keyedDeque) Delete(key interface{}) (v interface{}) {
deque.RLock()
e, ok := deque.index[key]
deque.RUnlock()
deque.Lock()
defer deque.Unlock()
if ok {
v = deque.syncedList.Remove(e)
delete(deque.index, key)
delete(deque.invertedIndex, e)
}
return
}
// Removes overwrites list.List.Remove.
func (deque *keyedDeque) Remove(e *list.Element) (v interface{}) {
deque.RLock()
key, ok := deque.invertedIndex[e]
deque.RUnlock()
if ok {
v = deque.Delete(key)
}
return
}
// Clear resets the deque.
func (deque *keyedDeque) Clear() {
deque.Lock()
defer deque.Unlock()
deque.syncedList.Clear()
deque.index = make(map[interface{}]*list.Element)
deque.invertedIndex = make(map[*list.Element]interface{})
}

View file

@ -1,196 +0,0 @@
package dht
import (
"sync"
"testing"
)
func TestSyncedMap(t *testing.T) {
cases := []mapItem{
{"a", 0},
{"b", 1},
{"c", 2},
}
sm := newSyncedMap()
set := func() {
group := sync.WaitGroup{}
for _, item := range cases {
group.Add(1)
go func(item mapItem) {
sm.Set(item.key, item.val)
group.Done()
}(item)
}
group.Wait()
}
isEmpty := func() {
if sm.Len() != 0 {
t.Fail()
}
}
// Set
set()
if sm.Len() != len(cases) {
t.Fail()
}
Loop:
// Iter
for item := range sm.Iter() {
for _, c := range cases {
if item.key == c.key && item.val == c.val {
continue Loop
}
}
t.Fail()
}
// Get, Delete, Has
for _, item := range cases {
val, ok := sm.Get(item.key)
if !ok || val != item.val {
t.Fail()
}
sm.Delete(item.key)
if sm.Has(item.key) {
t.Fail()
}
}
isEmpty()
// DeleteMulti
set()
sm.DeleteMulti([]interface{}{"a", "b", "c"})
isEmpty()
// Clear
set()
sm.Clear()
isEmpty()
}
func TestSyncedList(t *testing.T) {
sl := newSyncedList()
insert := func() {
for i := 0; i < 10; i++ {
sl.PushBack(i)
}
}
isEmpty := func() {
if sl.Len() != 0 {
t.Fail()
}
}
// PushBack
insert()
// Len
if sl.Len() != 10 {
t.Fail()
}
// Iter
i := 0
for item := range sl.Iter() {
if item.Value.(int) != i {
t.Fail()
}
i++
}
// Front
if sl.Front().Value.(int) != 0 {
t.Fail()
}
// Back
if sl.Back().Value.(int) != 9 {
t.Fail()
}
// Remove
for i := 0; i < 10; i++ {
if sl.Remove(sl.Front()).(int) != i {
t.Fail()
}
}
isEmpty()
// Clear
insert()
sl.Clear()
isEmpty()
}
func TestKeyedDeque(t *testing.T) {
cases := []mapItem{
{"a", 0},
{"b", 1},
{"c", 2},
}
deque := newKeyedDeque()
insert := func() {
for _, item := range cases {
deque.Push(item.key, item.val)
}
}
isEmpty := func() {
if deque.Len() != 0 {
t.Fail()
}
}
// Push
insert()
// Len
if deque.Len() != 3 {
t.Fail()
}
// Iter
i := 0
for e := range deque.Iter() {
if e.Value.(int) != i {
t.Fail()
}
i++
}
// HasKey, Get, Delete
for _, item := range cases {
if !deque.HasKey(item.key) {
t.Fail()
}
e, ok := deque.Get(item.key)
if !ok || e.Value.(int) != item.val {
t.Fail()
}
if deque.Delete(item.key) != item.val {
t.Fail()
}
if deque.HasKey(item.key) {
t.Fail()
}
}
isEmpty()
// Clear
insert()
deque.Clear()
isEmpty()
}

View file

@ -1,228 +1,231 @@
// Package dht implements the bittorrent dht protocol. For more information
// see http://www.bittorrent.org/beps/bep_0005.html.
package dht
import (
"encoding/hex"
"errors"
log "github.com/sirupsen/logrus"
"math"
"fmt"
"net"
"strings"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/sirupsen/logrus"
"github.com/spf13/cast"
)
// Config represents the configure of dht.
type Config struct {
// in mainline dht, k = 8
K int
// candidates are udp, udp4, udp6
Network string
// format is `ip:port`
Address string
// the prime nodes through which we can join in dht network
PrimeNodes []string
// the kbucket expired duration
KBucketExpiredAfter time.Duration
// the node expired duration
NodeExpriedAfter time.Duration
// how long it checks whether the bucket is expired
CheckKBucketPeriod time.Duration
// peer token expired duration
TokenExpiredAfter time.Duration
// the max transaction id
MaxTransactionCursor uint64
// how many nodes routing table can hold
MaxNodes int
// callback when got get_peers request
OnGetPeers func(string, string, int)
// callback when got announce_peer request
OnAnnouncePeer func(string, string, int)
// the times it tries when send fails
Try int
// the size of packet need to be dealt with
PacketJobLimit int
// the size of packet handler
PacketWorkerLimit int
// the nodes num to be fresh in a kbucket
RefreshNodeNum int
var log *logrus.Logger
func UseLogger(l *logrus.Logger) {
log = l
}
// NewStandardConfig returns a Config pointer with default values.
func NewStandardConfig() *Config {
return &Config{
K: 8,
Network: "udp4",
Address: ":4444",
PrimeNodes: []string{
"lbrynet1.lbry.io:4444",
"lbrynet2.lbry.io:4444",
"lbrynet3.lbry.io:4444",
},
NodeExpriedAfter: time.Duration(time.Minute * 15),
KBucketExpiredAfter: time.Duration(time.Minute * 15),
CheckKBucketPeriod: time.Duration(time.Second * 30),
TokenExpiredAfter: time.Duration(time.Minute * 10),
MaxTransactionCursor: math.MaxUint32,
MaxNodes: 5000,
Try: 2,
PacketJobLimit: 1024,
PacketWorkerLimit: 256,
RefreshNodeNum: 8,
}
func init() {
log = logrus.StandardLogger()
//log.SetFormatter(&log.TextFormatter{ForceColors: true})
//log.SetLevel(log.DebugLevel)
}
// DHT represents a DHT node.
type DHT struct {
*Config
node *node
conn *net.UDPConn
routingTable *routingTable
transactionManager *transactionManager
peersManager *peersManager
tokenManager *tokenManager
Ready bool
packets chan packet
workerTokens chan struct{}
// config
conf *Config
// local contact
contact Contact
// node
node *Node
// stopGroup to shut down DHT
grp *stop.Group
// channel is closed when DHT joins network
joined chan struct{}
// cache for store tokens
tokenCache *tokenCache
// hashes that need to be put into the announce queue or removed from the queue
announceAddRemove chan queueEdit
}
// New returns a DHT pointer. If config is nil, then config will be set to
// the default config.
// New returns a DHT pointer. If config is nil, then config will be set to the default config.
func New(config *Config) *DHT {
if config == nil {
config = NewStandardConfig()
}
node, err := newNode(randomString(nodeIDLength), config.Network, config.Address)
if err != nil {
panic(err)
}
d := &DHT{
Config: config,
node: node,
packets: make(chan packet, config.PacketJobLimit),
workerTokens: make(chan struct{}, config.PacketWorkerLimit),
conf: config,
grp: stop.New(),
joined: make(chan struct{}),
announceAddRemove: make(chan queueEdit),
}
return d
}
// init initializes global variables.
func (dht *DHT) init() {
log.Info("Initializing DHT on " + dht.Address)
log.Infof("Node ID is %s", dht.node.HexID())
listener, err := net.ListenPacket(dht.Network, dht.Address)
func (dht *DHT) connect(conn UDPConn) error {
contact, err := getContact(dht.conf.NodeID, dht.conf.Address)
if err != nil {
panic(err)
return err
}
dht.conn = listener.(*net.UDPConn)
dht.routingTable = newRoutingTable(dht.K, dht)
dht.peersManager = newPeersManager(dht)
dht.tokenManager = newTokenManager(dht.TokenExpiredAfter, dht)
dht.transactionManager = newTransactionManager(dht.MaxTransactionCursor, dht)
dht.contact = contact
dht.node = NewNode(contact.ID)
dht.tokenCache = newTokenCache(dht.node, tokenSecretRotationInterval)
go dht.transactionManager.run()
go dht.tokenManager.clear()
return dht.node.Connect(conn)
}
// Start starts the dht
func (dht *DHT) Start() error {
listener, err := net.ListenPacket(Network, dht.conf.Address)
if err != nil {
return errors.Err(err)
}
conn := listener.(*net.UDPConn)
err = dht.connect(conn)
if err != nil {
return err
}
dht.join()
log.Infof("[%s] DHT ready on %s (%d nodes found during join)",
dht.node.id.HexShort(), dht.contact.Addr().String(), dht.node.rt.Count())
dht.grp.Add(1)
go func() {
dht.runAnnouncer()
dht.grp.Done()
}()
if dht.conf.RPCPort > 0 {
dht.grp.Add(1)
go func() {
dht.runRPCServer(dht.conf.RPCPort)
dht.grp.Done()
}()
}
return nil
}
// join makes current node join the dht network.
func (dht *DHT) join() {
for _, addr := range dht.PrimeNodes {
raddr, err := net.ResolveUDPAddr(dht.Network, addr)
defer close(dht.joined) // if anyone's waiting for join to finish, they'll know its done
log.Infof("[%s] joining DHT network", dht.node.id.HexShort())
// ping nodes, which gets their real node IDs and adds them to the routing table
atLeastOneNodeResponded := false
for _, addr := range dht.conf.SeedNodes {
err := dht.Ping(addr)
if err != nil {
continue
}
// NOTE: Temporary node has NO node id.
dht.transactionManager.findNode(
&node{addr: raddr},
dht.node.id.RawString(),
)
log.Error(errors.Prefix(fmt.Sprintf("[%s] join", dht.node.id.HexShort()), err))
} else {
atLeastOneNodeResponded = true
}
}
// listen receives message from udp.
func (dht *DHT) listen() {
go func() {
buff := make([]byte, 8192)
for {
n, raddr, err := dht.conn.ReadFromUDP(buff)
if !atLeastOneNodeResponded {
log.Errorf("[%s] join: no nodes responded to initial ping", dht.node.id.HexShort())
return
}
// now call iterativeFind on yourself
_, _, err := FindContacts(dht.node, dht.node.id, false, dht.grp.Child())
if err != nil {
continue
log.Errorf("[%s] join: %s", dht.node.id.HexShort(), err.Error())
}
dht.packets <- packet{buff[:n], raddr}
}
}()
// TODO: after joining, refresh all buckets further away than our closest neighbor
// http://xlattice.sourceforge.net/components/protocol/kademlia/specs.html#join
}
// FindNode returns peers who have announced having key.
func (dht *DHT) FindNode(key string) ([]*Peer, error) {
if !dht.Ready {
return nil, errors.New("dht not ready")
// WaitUntilJoined blocks until the node joins the network.
func (dht *DHT) WaitUntilJoined() {
if dht.joined == nil {
panic("dht not initialized")
}
<-dht.joined
}
if len(key) == nodeIDLength*2 {
data, err := hex.DecodeString(key)
// Shutdown shuts down the dht
func (dht *DHT) Shutdown() {
log.Debugf("[%s] DHT shutting down", dht.node.id.HexShort())
dht.grp.StopAndWait()
dht.node.Shutdown()
log.Debugf("[%s] DHT stopped", dht.node.id.HexShort())
}
// Ping pings a given address, creates a temporary contact for sending a message, and returns an error if communication
// fails.
func (dht *DHT) Ping(addr string) error {
raddr, err := net.ResolveUDPAddr(Network, addr)
if err != nil {
return err
}
tmpNode := Contact{ID: bits.Rand(), IP: raddr.IP, Port: raddr.Port}
res := dht.node.Send(tmpNode, Request{Method: pingMethod}, SendOptions{skipIDCheck: true})
if res == nil {
return errors.Err("no response from node %s", addr)
}
return nil
}
// Get returns the list of nodes that have the blob for the given hash
func (dht *DHT) Get(hash bits.Bitmap) ([]Contact, error) {
contacts, found, err := FindContacts(dht.node, hash, true, dht.grp.Child())
if err != nil {
return nil, err
}
key = string(data)
if found {
return contacts, nil
}
return nil, nil
}
peers := dht.peersManager.GetPeers(key, dht.K)
if len(peers) != 0 {
return peers, nil
}
ch := make(chan struct{})
go func() {
neighbors := dht.routingTable.GetNeighbors(newBitmapFromString(key), dht.K)
for _, no := range neighbors {
dht.transactionManager.findNode(no, key)
}
i := 0
for range time.Tick(time.Second * 1) {
i++
peers = dht.peersManager.GetPeers(key, dht.K)
if len(peers) != 0 || i >= 30 {
break
// PrintState prints the current state of the DHT including address, nr outstanding transactions, stored hashes as well
// as current bucket information.
func (dht *DHT) PrintState() {
log.Printf("DHT node %s at %s", dht.contact.String(), time.Now().Format(time.RFC822Z))
log.Printf("Outstanding transactions: %d", dht.node.CountActiveTransactions())
log.Printf("Stored hashes: %d", dht.node.store.CountStoredHashes())
log.Printf("Buckets:")
for _, line := range strings.Split(dht.node.rt.BucketInfo(), "\n") {
log.Println(line)
}
}
ch <- struct{}{}
}()
<-ch
return peers, nil
func (dht DHT) ID() bits.Bitmap {
return dht.contact.ID
}
// Run starts the dht.
func (dht *DHT) Run() {
dht.init()
dht.listen()
dht.join()
func getContact(nodeID, addr string) (Contact, error) {
var c Contact
if nodeID == "" {
c.ID = bits.Rand()
} else {
c.ID = bits.FromHexP(nodeID)
}
dht.Ready = true
log.Info("DHT ready")
ip, port, err := net.SplitHostPort(addr)
if err != nil {
return c, errors.Err(err)
} else if ip == "" {
return c, errors.Err("address does not contain an IP")
} else if port == "" {
return c, errors.Err("address does not contain a port")
}
var pkt packet
tick := time.Tick(dht.CheckKBucketPeriod)
c.IP = net.ParseIP(ip)
if c.IP == nil {
return c, errors.Err("invalid ip")
}
for {
select {
case pkt = <-dht.packets:
handle(dht, pkt)
case <-tick:
if dht.routingTable.Len() == 0 {
dht.join()
} else if dht.transactionManager.len() == 0 {
go dht.routingTable.Fresh()
}
}
c.Port, err = cast.ToIntE(port)
if err != nil {
return c, errors.Err(err)
}
return c, nil
}

214
dht/dht_announce.go Normal file
View file

@ -0,0 +1,214 @@
package dht
import (
"container/ring"
"context"
"math"
"sync"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/errors"
"golang.org/x/time/rate"
)
type queueEdit struct {
hash bits.Bitmap
add bool
}
const (
announceStarted = "started"
announceFinishd = "finished"
)
type announceNotification struct {
hash bits.Bitmap
action string
err error
}
// Add adds the hash to the list of hashes this node is announcing
func (dht *DHT) Add(hash bits.Bitmap) {
dht.announceAddRemove <- queueEdit{hash: hash, add: true}
}
// Remove removes the hash from the list of hashes this node is announcing
func (dht *DHT) Remove(hash bits.Bitmap) {
dht.announceAddRemove <- queueEdit{hash: hash, add: false}
}
func (dht *DHT) runAnnouncer() {
type hashAndTime struct {
hash bits.Bitmap
lastAnnounce time.Time
}
var queue *ring.Ring
hashes := make(map[bits.Bitmap]*ring.Ring)
var announceNextHash <-chan time.Time
timer := time.NewTimer(math.MaxInt64)
timer.Stop()
limitCh := make(chan time.Time)
dht.grp.Add(1)
go func() {
defer dht.grp.Done()
limiter := rate.NewLimiter(rate.Limit(dht.conf.AnnounceRate), dht.conf.AnnounceRate)
for {
err := limiter.Wait(context.Background()) // TODO: should use grp.ctx somehow? so when grp is closed, wait returns
if err != nil {
log.Error(errors.Prefix("rate limiter", err))
continue
}
select {
case limitCh <- time.Now():
case <-dht.grp.Ch():
return
}
}
}()
maintenance := time.NewTicker(1 * time.Minute)
// TODO: work to space hash announces out so they aren't bunched up around the reannounce time. track time since last announce. if its been more than the ideal time (reannounce time / numhashes), start announcing hashes early
for {
select {
case <-dht.grp.Ch():
return
case <-maintenance.C:
maxAnnounce := dht.conf.AnnounceRate * int(dht.conf.ReannounceTime.Seconds())
if len(hashes) > maxAnnounce {
// TODO: send this to slack
log.Warnf("DHT has %d hashes, but can only announce %d hashes in the %s reannounce window. Raise the announce rate or spawn more nodes.",
len(hashes), maxAnnounce, dht.conf.ReannounceTime.String())
}
case change := <-dht.announceAddRemove:
if change.add {
if _, exists := hashes[change.hash]; exists {
continue
}
r := ring.New(1)
r.Value = hashAndTime{hash: change.hash}
if queue != nil {
queue.Prev().Link(r)
}
queue = r
hashes[change.hash] = r
announceNextHash = limitCh // announce next hash ASAP
} else {
r, exists := hashes[change.hash]
if !exists {
continue
}
delete(hashes, change.hash)
if len(hashes) == 0 {
queue = ring.New(0)
announceNextHash = nil // no hashes to announce, wait indefinitely
} else {
if r == queue {
queue = queue.Next() // don't lose our pointer
}
r.Prev().Link(r.Next())
}
}
case <-announceNextHash:
dht.grp.Add(1)
ht := queue.Value.(hashAndTime)
if !ht.lastAnnounce.IsZero() {
nextAnnounce := ht.lastAnnounce.Add(dht.conf.ReannounceTime)
if nextAnnounce.After(time.Now()) {
timer.Reset(time.Until(nextAnnounce))
announceNextHash = timer.C // wait until next hash should be announced
continue
}
}
if dht.conf.AnnounceNotificationCh != nil {
dht.conf.AnnounceNotificationCh <- announceNotification{
hash: ht.hash,
action: announceStarted,
}
}
go func(hash bits.Bitmap) {
defer dht.grp.Done()
err := dht.announce(hash)
if err != nil {
log.Error(errors.Prefix("announce", err))
}
if dht.conf.AnnounceNotificationCh != nil {
dht.conf.AnnounceNotificationCh <- announceNotification{
hash: ht.hash,
action: announceFinishd,
err: err,
}
}
}(ht.hash)
queue.Value = hashAndTime{hash: ht.hash, lastAnnounce: time.Now()}
queue = queue.Next()
announceNextHash = limitCh // announce next hash ASAP
}
}
}
// Announce announces to the DHT that this node has the blob for the given hash
func (dht *DHT) announce(hash bits.Bitmap) error {
contacts, _, err := FindContacts(dht.node, hash, false, dht.grp.Child())
if err != nil {
return err
}
// self-store if we found less than K contacts, or we're closer than the farthest contact
if len(contacts) < bucketSize {
contacts = append(contacts, dht.contact)
} else if hash.Closer(dht.node.id, contacts[bucketSize-1].ID) {
contacts[bucketSize-1] = dht.contact
}
wg := &sync.WaitGroup{}
for _, c := range contacts {
wg.Add(1)
go func(c Contact) {
dht.store(hash, c)
wg.Done()
}(c)
}
wg.Wait()
return nil
}
func (dht *DHT) store(hash bits.Bitmap, c Contact) {
if dht.contact.ID == c.ID {
// self-store
c.PeerPort = dht.conf.PeerProtocolPort
dht.node.Store(hash, c)
return
}
dht.node.SendAsync(c, Request{
Method: storeMethod,
StoreArgs: &storeArgs{
BlobHash: hash,
Value: storeArgsValue{
Token: dht.tokenCache.Get(c, hash, dht.grp.Ch()),
LbryID: dht.contact.ID,
Port: dht.conf.PeerProtocolPort,
},
},
})
}

181
dht/dht_test.go Normal file
View file

@ -0,0 +1,181 @@
package dht
import (
"net"
"sync"
"testing"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
)
func TestNodeFinder_FindNodes(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow nodeFinder test")
}
bs, dhts := TestingCreateNetwork(t, 3, true, false)
defer func() {
for i := range dhts {
dhts[i].Shutdown()
}
bs.Shutdown()
}()
contacts, found, err := FindContacts(dhts[2].node, bits.Rand(), false, nil)
if err != nil {
t.Fatal(err)
}
if found {
t.Fatal("something was found, but it should not have been")
}
if len(contacts) != 3 {
t.Errorf("expected 3 node, found %d", len(contacts))
}
foundBootstrap := false
foundOne := false
foundTwo := false
for _, n := range contacts {
if n.ID.Equals(bs.id) {
foundBootstrap = true
}
if n.ID.Equals(dhts[0].node.id) {
foundOne = true
}
if n.ID.Equals(dhts[1].node.id) {
foundTwo = true
}
}
if !foundBootstrap {
t.Errorf("did not find bootstrap node %s", bs.id.Hex())
}
if !foundOne {
t.Errorf("did not find first node %s", dhts[0].node.id.Hex())
}
if !foundTwo {
t.Errorf("did not find second node %s", dhts[1].node.id.Hex())
}
}
func TestNodeFinder_FindNodes_NoBootstrap(t *testing.T) {
_, dhts := TestingCreateNetwork(t, 3, false, false)
defer func() {
for i := range dhts {
dhts[i].Shutdown()
}
}()
_, _, err := FindContacts(dhts[2].node, bits.Rand(), false, nil)
if err == nil {
t.Fatal("contact finder should have errored saying that there are no contacts in the routing table")
}
}
func TestNodeFinder_FindValue(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow nodeFinder test")
}
bs, dhts := TestingCreateNetwork(t, 3, true, false)
defer func() {
for i := range dhts {
dhts[i].Shutdown()
}
bs.Shutdown()
}()
blobHashToFind := bits.Rand()
nodeToFind := Contact{ID: bits.Rand(), IP: net.IPv4(1, 2, 3, 4), Port: 5678}
dhts[0].node.store.Upsert(blobHashToFind, nodeToFind)
contacts, found, err := FindContacts(dhts[2].node, blobHashToFind, true, nil)
if err != nil {
t.Fatal(err)
}
if !found {
t.Fatal("node was not found")
}
if len(contacts) != 1 {
t.Fatalf("expected one node, found %d", len(contacts))
}
if !contacts[0].ID.Equals(nodeToFind.ID) {
t.Fatalf("found node id %s, expected %s", contacts[0].ID.Hex(), nodeToFind.ID.Hex())
}
}
func TestDHT_LargeDHT(t *testing.T) {
if testing.Short() {
t.Skip("skipping large DHT test")
}
nodes := 100
bs, dhts := TestingCreateNetwork(t, nodes, true, true)
defer func() {
for _, d := range dhts {
go d.Shutdown()
}
bs.Shutdown()
time.Sleep(1 * time.Second)
}()
wg := &sync.WaitGroup{}
ids := make([]bits.Bitmap, nodes)
for i := range ids {
ids[i] = bits.Rand()
wg.Add(1)
go func(index int) {
defer wg.Done()
err := dhts[index].announce(ids[index])
if err != nil {
t.Error("error announcing random bitmap - ", err)
}
}(i)
}
wg.Wait()
// check that each node is in at learst 1 other routing table
rtCounts := make(map[bits.Bitmap]int)
for _, d := range dhts {
for _, d2 := range dhts {
if d.node.id.Equals(d2.node.id) {
continue
}
c := d2.node.rt.GetClosest(d.node.id, 1)
if len(c) > 1 {
t.Error("rt returned more than one node when only one requested")
} else if len(c) == 1 && c[0].ID.Equals(d.node.id) {
rtCounts[d.node.id]++
}
}
}
for k, v := range rtCounts {
if v == 0 {
t.Errorf("%s was not in any routing tables", k.HexShort())
}
}
// check that each ID is stored by at least 3 nodes
storeCounts := make(map[bits.Bitmap]int)
for _, d := range dhts {
for _, id := range ids {
if len(d.node.store.Get(id)) > 0 {
storeCounts[id]++
}
}
}
for k, v := range storeCounts {
if v == 0 {
t.Errorf("%s was not stored by any nodes", k.HexShort())
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,655 +0,0 @@
package dht
import (
"fmt"
"github.com/davecgh/go-spew/spew"
log "github.com/sirupsen/logrus"
"github.com/spf13/cast"
"github.com/zeebo/bencode"
"net"
"reflect"
"strings"
"sync"
"time"
)
const (
pingMethod = "ping"
storeMethod = "store"
findNodeMethod = "findNode"
findValueMethod = "findValue"
)
const (
generalError = 201 + iota
serverError
protocolError
unknownError
)
const (
requestType = 0
responseType = 1
errorType = 2
)
const (
// these are strings because bencode requires bytestring keys
headerTypeField = "0"
headerMessageIDField = "1"
headerNodeIDField = "2"
headerPayloadField = "3"
headerArgsField = "4"
)
type Message interface {
GetID() string
Encode() ([]byte, error)
}
type Request struct {
ID string
NodeID string
Method string
Args []string
}
func (r Request) GetID() string { return r.ID }
func (r Request) Encode() ([]byte, error) {
return bencode.EncodeBytes(map[string]interface{}{
headerTypeField: requestType,
headerMessageIDField: r.ID,
headerNodeIDField: r.NodeID,
headerPayloadField: r.Method,
headerArgsField: r.Args,
})
}
type findNodeDatum struct {
ID string
IP string
Port int
}
type Response struct {
ID string
NodeID string
Data string
FindNodeData []findNodeDatum
}
func (r Response) GetID() string { return r.ID }
func (r Response) Encode() ([]byte, error) {
data := map[string]interface{}{
headerTypeField: responseType,
headerMessageIDField: r.ID,
headerNodeIDField: r.NodeID,
}
if r.Data != "" {
data[headerPayloadField] = r.Data
} else {
var nodes []interface{}
for _, n := range r.FindNodeData {
nodes = append(nodes, []interface{}{n.ID, n.IP, n.Port})
}
data[headerPayloadField] = nodes
}
log.Info("Response data is ")
spew.Dump(data)
return bencode.EncodeBytes(data)
}
type Error struct {
ID string
NodeID string
Response []string
ExceptionType string
}
func (e Error) GetID() string { return e.ID }
func (e Error) Encode() ([]byte, error) {
return bencode.EncodeBytes(map[string]interface{}{
headerTypeField: errorType,
headerMessageIDField: e.ID,
headerNodeIDField: e.NodeID,
headerPayloadField: e.ExceptionType,
headerArgsField: e.Response,
})
}
// packet represents the information receive from udp.
type packet struct {
data []byte
raddr *net.UDPAddr
}
// token represents the token when response getPeers request.
type token struct {
data string
createTime time.Time
}
// tokenManager managers the tokens.
type tokenManager struct {
*syncedMap
expiredAfter time.Duration
dht *DHT
}
// newTokenManager returns a new tokenManager.
func newTokenManager(expiredAfter time.Duration, dht *DHT) *tokenManager {
return &tokenManager{
syncedMap: newSyncedMap(),
expiredAfter: expiredAfter,
dht: dht,
}
}
// token returns a token. If it doesn't exist or is expired, it will add a
// new token.
func (tm *tokenManager) token(addr *net.UDPAddr) string {
v, ok := tm.Get(addr.IP.String())
tk, _ := v.(token)
if !ok || time.Now().Sub(tk.createTime) > tm.expiredAfter {
tk = token{
data: randomString(5),
createTime: time.Now(),
}
tm.Set(addr.IP.String(), tk)
}
return tk.data
}
// clear removes expired tokens.
func (tm *tokenManager) clear() {
for range time.Tick(time.Minute * 3) {
keys := make([]interface{}, 0, 100)
for item := range tm.Iter() {
if time.Now().Sub(item.val.(token).createTime) > tm.expiredAfter {
keys = append(keys, item.key)
}
}
tm.DeleteMulti(keys)
}
}
// check returns whether the token is valid.
func (tm *tokenManager) check(addr *net.UDPAddr, tokenString string) bool {
key := addr.IP.String()
v, ok := tm.Get(key)
tk, _ := v.(token)
if ok {
tm.Delete(key)
}
return ok && tokenString == tk.data
}
// send sends data to the udp.
func send(dht *DHT, addr *net.UDPAddr, data Message) error {
log.Infof("Sending %s", spew.Sdump(data))
encoded, err := data.Encode()
if err != nil {
return err
}
log.Infof("Encoded: %s", string(encoded))
dht.conn.SetWriteDeadline(time.Now().Add(time.Second * 15))
_, err = dht.conn.WriteToUDP(encoded, addr)
return err
}
// query represents the query data included queried node and query-formed data.
type query struct {
node *node
request Request
}
// transaction implements transaction.
type transaction struct {
*query
id string
response chan struct{}
}
// transactionManager represents the manager of transactions.
type transactionManager struct {
*sync.RWMutex
transactions *syncedMap
index *syncedMap
cursor uint64
maxCursor uint64
queryChan chan *query
dht *DHT
}
// newTransactionManager returns new transactionManager pointer.
func newTransactionManager(maxCursor uint64, dht *DHT) *transactionManager {
return &transactionManager{
RWMutex: &sync.RWMutex{},
transactions: newSyncedMap(),
index: newSyncedMap(),
maxCursor: maxCursor,
queryChan: make(chan *query, 1024),
dht: dht,
}
}
// genTransID generates a transaction id and returns it.
func (tm *transactionManager) genTransID() string {
tm.Lock()
defer tm.Unlock()
tm.cursor = (tm.cursor + 1) % tm.maxCursor
return string(int2bytes(tm.cursor))
}
// newTransaction creates a new transaction.
func (tm *transactionManager) newTransaction(id string, q *query) *transaction {
return &transaction{
id: id,
query: q,
response: make(chan struct{}, tm.dht.Try+1),
}
}
// genIndexKey generates an indexed key which consists of queryType and
// address.
func (tm *transactionManager) genIndexKey(queryType, address string) string {
return strings.Join([]string{queryType, address}, ":")
}
// genIndexKeyByTrans generates an indexed key by a transaction.
func (tm *transactionManager) genIndexKeyByTrans(trans *transaction) string {
return tm.genIndexKey(trans.request.Method, trans.node.addr.String())
}
// insert adds a transaction to transactionManager.
func (tm *transactionManager) insert(trans *transaction) {
tm.Lock()
defer tm.Unlock()
tm.transactions.Set(trans.id, trans)
tm.index.Set(tm.genIndexKeyByTrans(trans), trans)
}
// delete removes a transaction from transactionManager.
func (tm *transactionManager) delete(transID string) {
v, ok := tm.transactions.Get(transID)
if !ok {
return
}
tm.Lock()
defer tm.Unlock()
trans := v.(*transaction)
tm.transactions.Delete(trans.id)
tm.index.Delete(tm.genIndexKeyByTrans(trans))
}
// len returns how many transactions are requesting now.
func (tm *transactionManager) len() int {
return tm.transactions.Len()
}
// transaction returns a transaction. keyType should be one of 0, 1 which
// represents transId and index each.
func (tm *transactionManager) transaction(key string, keyType int) *transaction {
sm := tm.transactions
if keyType == 1 {
sm = tm.index
}
v, ok := sm.Get(key)
if !ok {
return nil
}
return v.(*transaction)
}
// getByTransID returns a transaction by transID.
func (tm *transactionManager) getByTransID(transID string) *transaction {
return tm.transaction(transID, 0)
}
// getByIndex returns a transaction by indexed key.
func (tm *transactionManager) getByIndex(index string) *transaction {
return tm.transaction(index, 1)
}
// transaction gets the proper transaction with whose id is transId and
// address is addr.
func (tm *transactionManager) filterOne(transID string, addr *net.UDPAddr) *transaction {
trans := tm.getByTransID(transID)
if trans == nil || trans.node.addr.String() != addr.String() {
return nil
}
return trans
}
// query sends the query-formed data to udp and wait for the response.
// When timeout, it will retry `try - 1` times, which means it will query
// `try` times totally.
func (tm *transactionManager) query(q *query, try int) {
trans := tm.newTransaction(q.request.ID, q)
tm.insert(trans)
defer tm.delete(trans.id)
success := false
for i := 0; i < try; i++ {
if err := send(tm.dht, q.node.addr, q.request); err != nil {
break
}
select {
case <-trans.response:
success = true
break
case <-time.After(time.Second * 15):
}
}
if !success && q.node.id != nil {
tm.dht.routingTable.RemoveByAddr(q.node.addr.String())
}
}
// run starts to listen and consume the query chan.
func (tm *transactionManager) run() {
var q *query
for {
select {
case q = <-tm.queryChan:
go tm.query(q, tm.dht.Try)
}
}
}
// sendQuery send query-formed data to the chan.
func (tm *transactionManager) sendQuery(no *node, request Request) {
// If the target is self, then stop.
if no.id != nil && no.id.RawString() == tm.dht.node.id.RawString() ||
tm.getByIndex(tm.genIndexKey(request.Method, no.addr.String())) != nil {
return
}
request.ID = tm.genTransID()
request.NodeID = tm.dht.node.id.RawString()
tm.queryChan <- &query{node: no, request: request}
}
// ping sends ping query to the chan.
func (tm *transactionManager) ping(no *node) {
tm.sendQuery(no, Request{Method: pingMethod})
}
// findNode sends find_node query to the chan.
func (tm *transactionManager) findNode(no *node, target string) {
tm.sendQuery(no, Request{Method: findNodeMethod, Args: []string{target}})
}
// handle handles packets received from udp.
func handle(dht *DHT, pkt packet) {
log.Infof("Received message from %s: %s", pkt.raddr.IP.String(), string(pkt.data))
if len(dht.workerTokens) == dht.PacketWorkerLimit {
return
}
dht.workerTokens <- struct{}{}
go func() {
defer func() {
<-dht.workerTokens
}()
var data map[string]interface{}
err := bencode.DecodeBytes(pkt.data, &data)
if err != nil {
log.Errorf("Error decoding data: %s\n%s", err, pkt.data)
return
}
msgType, ok := data[headerTypeField]
if !ok {
log.Errorf("Decoded data has no message type: %s", data)
return
}
switch msgType.(int64) {
case requestType:
request := Request{
ID: data[headerMessageIDField].(string),
NodeID: data[headerNodeIDField].(string),
Method: data[headerPayloadField].(string),
Args: getArgs(data[headerArgsField]),
}
spew.Dump(request)
handleRequest(dht, pkt.raddr, request)
case responseType:
response := Response{
ID: data[headerMessageIDField].(string),
NodeID: data[headerNodeIDField].(string),
}
if reflect.TypeOf(data[headerPayloadField]).Kind() == reflect.String {
response.Data = data[headerPayloadField].(string)
} else {
response.FindNodeData = getFindNodeResponse(data[headerPayloadField])
}
spew.Dump(response)
handleResponse(dht, pkt.raddr, response)
case errorType:
e := Error{
ID: data[headerMessageIDField].(string),
NodeID: data[headerNodeIDField].(string),
ExceptionType: data[headerPayloadField].(string),
Response: getArgs(data[headerArgsField]),
}
handleError(dht, pkt.raddr, e)
default:
log.Errorf("Invalid message type: %s", msgType)
return
}
}()
}
func getFindNodeResponse(i interface{}) (data []findNodeDatum) {
if reflect.TypeOf(i).Kind() != reflect.Slice {
return
}
v := reflect.ValueOf(i)
for i := 0; i < v.Len(); i++ {
if v.Index(i).Kind() != reflect.Interface {
continue
}
contact := v.Index(i).Elem()
if contact.Type().Kind() != reflect.Slice || contact.Len() != 3 {
continue
}
if contact.Index(0).Elem().Kind() != reflect.String ||
contact.Index(1).Elem().Kind() != reflect.String ||
!(contact.Index(2).Elem().Kind() == reflect.Int64 ||
contact.Index(2).Elem().Kind() == reflect.Int) {
continue
}
data = append(data, findNodeDatum{
ID: contact.Index(0).Elem().String(),
IP: contact.Index(1).Elem().String(),
Port: int(contact.Index(2).Elem().Int()),
})
}
return
}
func getArgs(argsInt interface{}) (args []string) {
if reflect.TypeOf(argsInt).Kind() == reflect.Slice {
v := reflect.ValueOf(argsInt)
for i := 0; i < v.Len(); i++ {
args = append(args, cast.ToString(v.Index(i).Interface()))
}
}
return
}
// handleRequest handles the requests received from udp.
func handleRequest(dht *DHT, addr *net.UDPAddr, request Request) (success bool) {
if request.NodeID == dht.node.id.RawString() {
return
}
if len(request.NodeID) != nodeIDLength {
send(dht, addr, Error{ID: request.ID, NodeID: dht.node.id.RawString(), Response: []string{"Invalid ID"}})
return
}
if no, ok := dht.routingTable.GetNodeByAddress(addr.String()); ok && no.id.RawString() != request.NodeID {
dht.routingTable.RemoveByAddr(addr.String())
send(dht, addr, Error{ID: request.ID, NodeID: dht.node.id.RawString(), Response: []string{"Invalid ID"}})
return
}
switch request.Method {
case pingMethod:
send(dht, addr, Response{ID: request.ID, NodeID: dht.node.id.RawString(), Data: "pong"})
case findNodeMethod:
if len(request.Args) < 1 {
send(dht, addr, Error{ID: request.ID, NodeID: dht.node.id.RawString(), Response: []string{"No target"}})
return
}
target := request.Args[0]
if len(target) != nodeIDLength {
send(dht, addr, Error{ID: request.ID, NodeID: dht.node.id.RawString(), Response: []string{"Invalid target"}})
return
}
nodes := []findNodeDatum{}
targetID := newBitmapFromString(target)
no, _ := dht.routingTable.GetNodeKBucktByID(targetID)
if no != nil {
nodes = []findNodeDatum{{ID: no.id.RawString(), IP: no.addr.IP.String(), Port: no.addr.Port}}
} else {
neighbors := dht.routingTable.GetNeighbors(targetID, dht.K)
for _, n := range neighbors {
nodes = append(nodes, findNodeDatum{ID: n.id.RawString(), IP: n.addr.IP.String(), Port: n.addr.Port})
}
}
send(dht, addr, Response{ID: request.ID, NodeID: dht.node.id.RawString(), FindNodeData: nodes})
default:
// send(dht, addr, makeError(t, protocolError, "invalid q"))
return
}
no, _ := newNode(request.NodeID, addr.Network(), addr.String())
dht.routingTable.Insert(no)
return true
}
// findOn puts nodes in the response to the routingTable, then if target is in
// the nodes or all nodes are in the routingTable, it stops. Otherwise it
// continues to findNode or getPeers.
func findOn(dht *DHT, nodes []findNodeDatum, target *bitmap, queryType string) error {
hasNew, found := false, false
for _, n := range nodes {
no, err := newNode(n.ID, dht.Network, fmt.Sprintf("%s:%d", n.IP, n.Port))
if err != nil {
return err
}
if no.id.RawString() == target.RawString() {
found = true
}
if dht.routingTable.Insert(no) {
hasNew = true
}
}
if found || !hasNew {
return nil
}
targetID := target.RawString()
for _, no := range dht.routingTable.GetNeighbors(target, dht.K) {
switch queryType {
case findNodeMethod:
dht.transactionManager.findNode(no, targetID)
default:
panic("invalid find type")
}
}
return nil
}
// handleResponse handles responses received from udp.
func handleResponse(dht *DHT, addr *net.UDPAddr, response Response) (success bool) {
trans := dht.transactionManager.filterOne(response.ID, addr)
if trans == nil {
return
}
// If response's node id is not the same with the node id in the
// transaction, raise error.
// TODO: is this necessary??? why??
if trans.node.id != nil && trans.node.id.RawString() != response.NodeID {
dht.routingTable.RemoveByAddr(addr.String())
return
}
node, err := newNode(response.NodeID, addr.Network(), addr.String())
if err != nil {
return
}
switch trans.request.Method {
case pingMethod:
case findNodeMethod:
target := trans.request.Args[0]
if findOn(dht, response.FindNodeData, newBitmapFromString(target), findNodeMethod) != nil {
return
}
default:
return
}
// inform transManager to delete transaction.
trans.response <- struct{}{}
dht.routingTable.Insert(node)
return true
}
// handleError handles errors received from udp.
func handleError(dht *DHT, addr *net.UDPAddr, e Error) (success bool) {
if trans := dht.transactionManager.filterOne(e.ID, addr); trans != nil {
trans.response <- struct{}{}
}
return true
}

View file

@ -1,39 +0,0 @@
package dht
import (
"math/rand"
"strconv"
"testing"
"time"
)
func TestDHT(t *testing.T) {
rand.Seed(time.Now().UnixNano())
port := 49449 // + (rand.Int() % 10)
config := NewStandardConfig()
config.Address = "127.0.0.1:" + strconv.Itoa(port)
config.PrimeNodes = []string{
"127.0.0.1:10001",
}
d := New(config)
t.Log("Starting...")
go d.Run()
time.Sleep(2 * time.Second)
for {
peers, err := d.FindNode("012b66fc7052d9a0c8cb563b8ede7662003ba65f425c2661b5c6919d445deeb31469be8b842d6faeea3f2b3ebcaec845")
if err != nil {
time.Sleep(time.Second * 1)
continue
}
t.Log("Found peers:", peers)
break
}
t.Error("failed")
}

472
dht/message.go Normal file
View file

@ -0,0 +1,472 @@
package dht
import (
"crypto/rand"
"encoding/hex"
"reflect"
"strconv"
"strings"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lyoshenka/bencode"
"github.com/spf13/cast"
)
const (
pingMethod = "ping"
storeMethod = "store"
findNodeMethod = "findNode"
findValueMethod = "findValue"
)
const (
pingSuccessResponse = "pong"
storeSuccessResponse = "OK"
)
const (
requestType = 0
responseType = 1
errorType = 2
)
const (
// these are strings because bencode requires bytestring keys
headerTypeField = "0"
headerMessageIDField = "1" // message id is 20 bytes long
headerNodeIDField = "2" // node id is 48 bytes long
headerPayloadField = "3"
headerArgsField = "4"
contactsField = "contacts"
pageField = "p"
tokenField = "token"
protocolVersionField = "protocolVersion"
)
// Message is a DHT message
type Message interface {
bencode.Marshaler
}
type messageID [messageIDLength]byte
// HexShort returns the first 8 hex characters of the hex encoded message id.
func (m messageID) HexShort() string {
return hex.EncodeToString(m[:])[:8]
}
// UnmarshalBencode takes a byte slice and unmarshals the message id.
func (m *messageID) UnmarshalBencode(encoded []byte) error {
var str string
err := bencode.DecodeBytes(encoded, &str)
if err != nil {
return err
}
copy(m[:], str)
return nil
}
// MarshallBencode returns the encoded byte slice of the message id.
func (m messageID) MarshalBencode() ([]byte, error) {
str := string(m[:])
return bencode.EncodeBytes(str)
}
func newMessageID() messageID {
var m messageID
_, err := rand.Read(m[:])
if err != nil {
panic(err)
}
return m
}
// Request represents a DHT request message
type Request struct {
ID messageID
NodeID bits.Bitmap
Method string
Arg *bits.Bitmap
StoreArgs *storeArgs
ProtocolVersion int
}
// MarshalBencode returns the serialized byte slice representation of the request
func (r Request) MarshalBencode() ([]byte, error) {
var args interface{}
if r.StoreArgs != nil {
args = r.StoreArgs
} else if r.Arg != nil {
args = []bits.Bitmap{*r.Arg}
} else {
args = []string{} // request must always have keys 0-4, so we use an empty list for PING
}
return bencode.EncodeBytes(map[string]interface{}{
headerTypeField: requestType,
headerMessageIDField: r.ID,
headerNodeIDField: r.NodeID,
headerPayloadField: r.Method,
headerArgsField: args,
})
}
// UnmarshalBencode unmarshals the serialized byte slice into the appropriate fields of the request.
func (r *Request) UnmarshalBencode(b []byte) error {
var raw struct {
ID messageID `bencode:"1"`
NodeID bits.Bitmap `bencode:"2"`
Method string `bencode:"3"`
Args bencode.RawMessage `bencode:"4"`
}
err := bencode.DecodeBytes(b, &raw)
if err != nil {
return errors.Prefix("request unmarshal", err)
}
r.ID = raw.ID
r.NodeID = raw.NodeID
r.Method = raw.Method
if r.Method == storeMethod {
r.StoreArgs = &storeArgs{} // bencode wont find the unmarshaler on a null pointer. need to fix it.
err = bencode.DecodeBytes(raw.Args, &r.StoreArgs)
if err != nil {
return errors.Prefix("request unmarshal", err)
}
} else if len(raw.Args) > 2 { // 2 because an empty list is `le`
r.Arg, r.ProtocolVersion, err = processArgsAndProtoVersion(raw.Args)
if err != nil {
return errors.Prefix("request unmarshal", err)
}
}
return nil
}
func processArgsAndProtoVersion(raw bencode.RawMessage) (arg *bits.Bitmap, version int, err error) {
var args []bencode.RawMessage
err = bencode.DecodeBytes(raw, &args)
if err != nil {
return nil, 0, err
}
if len(args) == 0 {
return nil, 0, nil
}
var extras map[string]int
err = bencode.DecodeBytes(args[len(args)-1], &extras)
if err == nil {
if v, exists := extras[protocolVersionField]; exists {
version = v
args = args[:len(args)-1]
}
}
if len(args) > 0 {
var b bits.Bitmap
err = bencode.DecodeBytes(args[0], &b)
if err != nil {
return nil, 0, err
}
arg = &b
}
return arg, version, nil
}
func (r Request) argsDebug() string {
if r.StoreArgs != nil {
return r.StoreArgs.BlobHash.HexShort() + ", " + r.StoreArgs.Value.LbryID.HexShort() + ":" + strconv.Itoa(r.StoreArgs.Value.Port)
} else if r.Arg != nil {
return r.Arg.HexShort()
}
return ""
}
type storeArgsValue struct {
Token string `bencode:"token"`
LbryID bits.Bitmap `bencode:"lbryid"`
Port int `bencode:"port"`
}
type storeArgs struct {
BlobHash bits.Bitmap
Value storeArgsValue
NodeID bits.Bitmap // original publisher id? I think this is getting fixed in the new dht stuff
SelfStore bool // this is an int on the wire
}
// MarshalBencode returns the serialized byte slice representation of the storage arguments.
func (s storeArgs) MarshalBencode() ([]byte, error) {
encodedValue, err := bencode.EncodeString(s.Value)
if err != nil {
return nil, err
}
selfStoreStr := 0
if s.SelfStore {
selfStoreStr = 1
}
return bencode.EncodeBytes([]interface{}{
s.BlobHash,
bencode.RawMessage(encodedValue),
s.NodeID,
selfStoreStr,
})
}
// UnmarshalBencode unmarshals the serialized byte slice into the appropriate fields of the store arguments.
func (s *storeArgs) UnmarshalBencode(b []byte) error {
var argsInt []bencode.RawMessage
err := bencode.DecodeBytes(b, &argsInt)
if err != nil {
return errors.Prefix("storeArgs unmarshal", err)
}
if len(argsInt) != 4 {
return errors.Err("unexpected number of fields for store args. got " + cast.ToString(len(argsInt)))
}
err = bencode.DecodeBytes(argsInt[0], &s.BlobHash)
if err != nil {
return errors.Prefix("storeArgs unmarshal", err)
}
err = bencode.DecodeBytes(argsInt[1], &s.Value)
if err != nil {
return errors.Prefix("storeArgs unmarshal", err)
}
err = bencode.DecodeBytes(argsInt[2], &s.NodeID)
if err != nil {
return errors.Prefix("storeArgs unmarshal", err)
}
var selfStore int
err = bencode.DecodeBytes(argsInt[3], &selfStore)
if err != nil {
return errors.Prefix("storeArgs unmarshal", err)
}
if selfStore == 0 {
s.SelfStore = false
} else if selfStore == 1 {
s.SelfStore = true
} else {
return errors.Err("selfstore must be 1 or 0")
}
return nil
}
// Response represents a DHT response message
type Response struct {
ID messageID
NodeID bits.Bitmap
Data string
Contacts []Contact
FindValueKey string
Token string
ProtocolVersion int
Page uint8
}
func (r Response) argsDebug() string {
if r.Data != "" {
return r.Data
}
str := "contacts "
if r.FindValueKey != "" {
str = "value for " + hex.EncodeToString([]byte(r.FindValueKey))[:8] + " "
}
str += "|"
for _, c := range r.Contacts {
str += c.String() + ","
}
str = strings.TrimRight(str, ",") + "|"
if r.Token != "" {
str += " token: " + hex.EncodeToString([]byte(r.Token))[:8]
}
return str
}
// MarshalBencode returns the serialized byte slice representation of the response.
func (r Response) MarshalBencode() ([]byte, error) {
data := map[string]interface{}{
headerTypeField: responseType,
headerMessageIDField: r.ID,
headerNodeIDField: r.NodeID,
}
if r.Data != "" {
// ping or store
data[headerPayloadField] = r.Data
} else if r.FindValueKey != "" {
// findValue success
if r.Token == "" {
return nil, errors.Err("response to findValue must have a token")
}
var contacts [][]byte
for _, c := range r.Contacts {
compact, err := c.MarshalCompact()
if err != nil {
return nil, err
}
contacts = append(contacts, compact)
}
data[headerPayloadField] = map[string]interface{}{
r.FindValueKey: contacts,
tokenField: r.Token,
}
} else if r.Token != "" {
// findValue failure falling back to findNode
data[headerPayloadField] = map[string]interface{}{
contactsField: r.Contacts,
tokenField: r.Token,
}
} else {
// straight up findNode
data[headerPayloadField] = r.Contacts
}
return bencode.EncodeBytes(data)
}
// UnmarshalBencode unmarshals the serialized byte slice into the appropriate fields of the store arguments.
func (r *Response) UnmarshalBencode(b []byte) error {
var raw struct {
ID messageID `bencode:"1"`
NodeID bits.Bitmap `bencode:"2"`
Data bencode.RawMessage `bencode:"3"`
}
err := bencode.DecodeBytes(b, &raw)
if err != nil {
return err
}
r.ID = raw.ID
r.NodeID = raw.NodeID
// maybe data is a string (response to ping or store)?
err = bencode.DecodeBytes(raw.Data, &r.Data)
if err == nil {
return nil
}
// maybe data is a list of contacts (response to findNode)?
err = bencode.DecodeBytes(raw.Data, &r.Contacts)
if err == nil {
return nil
}
// it must be a response to findValue
var rawData map[string]bencode.RawMessage
err = bencode.DecodeBytes(raw.Data, &rawData)
if err != nil {
return err
}
if token, ok := rawData[tokenField]; ok {
err = bencode.DecodeBytes(token, &r.Token)
if err != nil {
return err
}
delete(rawData, tokenField) // so it doesnt mess up findValue key finding below
}
if protocolVersion, ok := rawData[protocolVersionField]; ok {
err = bencode.DecodeBytes(protocolVersion, &r.ProtocolVersion)
if err != nil {
return err
}
delete(rawData, protocolVersionField) // so it doesnt mess up findValue key finding below
}
if contacts, ok := rawData[contactsField]; ok {
err = bencode.DecodeBytes(contacts, &r.Contacts)
delete(rawData, contactsField) // so it doesnt mess up findValue key finding below
if err != nil {
return err
}
}
if page, ok := rawData[pageField]; ok {
err = bencode.DecodeBytes(page, &r.Page)
delete(rawData, pageField) // so it doesnt mess up findValue key finding below
if err != nil {
return err
}
}
for k, v := range rawData {
r.FindValueKey = k
var compactContacts [][]byte
err = bencode.DecodeBytes(v, &compactContacts)
if err != nil {
return err
}
for _, compact := range compactContacts {
var c Contact
err = c.UnmarshalCompact(compact)
if err != nil {
return err
}
r.Contacts = append(r.Contacts, c)
}
break
}
return nil
}
// Error represents a DHT error response
type Error struct {
ID messageID
NodeID bits.Bitmap
ExceptionType string
Response []string
}
// MarshalBencode returns the serialized byte slice representation of an error message.
func (e Error) MarshalBencode() ([]byte, error) {
return bencode.EncodeBytes(map[string]interface{}{
headerTypeField: errorType,
headerMessageIDField: e.ID,
headerNodeIDField: e.NodeID,
headerPayloadField: e.ExceptionType,
headerArgsField: e.Response,
})
}
// UnmarshalBencode unmarshals the serialized byte slice into the appropriate fields of the error message.
func (e *Error) UnmarshalBencode(b []byte) error {
var raw struct {
ID messageID `bencode:"1"`
NodeID bits.Bitmap `bencode:"2"`
ExceptionType string `bencode:"3"`
Args interface{} `bencode:"4"`
}
err := bencode.DecodeBytes(b, &raw)
if err != nil {
return err
}
e.ID = raw.ID
e.NodeID = raw.NodeID
e.ExceptionType = raw.ExceptionType
if reflect.TypeOf(raw.Args).Kind() == reflect.Slice {
v := reflect.ValueOf(raw.Args)
for i := 0; i < v.Len(); i++ {
e.Response = append(e.Response, cast.ToString(v.Index(i).Interface()))
}
}
return nil
}

224
dht/message_test.go Normal file

File diff suppressed because one or more lines are too long

474
dht/node.go Normal file
View file

@ -0,0 +1,474 @@
package dht
import (
"encoding/hex"
"net"
"strings"
"sync"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/davecgh/go-spew/spew"
"github.com/lyoshenka/bencode"
)
// packet represents the information receive from udp.
type packet struct {
data []byte
raddr *net.UDPAddr
}
// UDPConn allows using a mocked connection to test sending/receiving data
// TODO: stop mocking this and use the real thing
type UDPConn interface {
ReadFromUDP([]byte) (int, *net.UDPAddr, error)
WriteToUDP([]byte, *net.UDPAddr) (int, error)
SetReadDeadline(time.Time) error
SetWriteDeadline(time.Time) error
Close() error
}
// RequestHandlerFunc is exported handler for requests.
type RequestHandlerFunc func(addr *net.UDPAddr, request Request)
// Node is a type representation of a node on the network.
type Node struct {
// the node's id
id bits.Bitmap
// UDP connection for sending and receiving data
conn UDPConn
// true if we've closed the connection on purpose
connClosed bool
// token manager
tokens *tokenManager
// map of outstanding transactions + mutex
txLock *sync.RWMutex
transactions map[messageID]*transaction
// routing table
rt *routingTable
// data store
store *contactStore
// overrides for request handlers
requestHandler RequestHandlerFunc
// stop the node neatly and clean up after itself
grp *stop.Group
}
// NewNode returns an initialized Node's pointer.
func NewNode(id bits.Bitmap) *Node {
return &Node{
id: id,
rt: newRoutingTable(id),
store: newStore(),
txLock: &sync.RWMutex{},
transactions: make(map[messageID]*transaction),
grp: stop.New(),
tokens: &tokenManager{},
}
}
// Connect connects to the given connection and starts any background threads necessary
func (n *Node) Connect(conn UDPConn) error {
n.conn = conn
n.tokens.Start(tokenSecretRotationInterval)
go func() {
// stop tokens and close the connection when we're shutting down
<-n.grp.Ch()
n.tokens.Stop()
n.connClosed = true
err := n.conn.Close()
if err != nil {
log.Error("error closing node connection on shutdown - ", err)
}
}()
packets := make(chan packet)
n.grp.Add(1)
go func() {
defer n.grp.Done()
buf := make([]byte, udpMaxMessageLength)
for {
bytesRead, raddr, err := n.conn.ReadFromUDP(buf)
if err != nil {
if n.connClosed {
return
}
log.Errorf("udp read error: %v", err)
continue
} else if raddr == nil {
log.Errorf("udp read with no raddr")
continue
}
data := make([]byte, bytesRead)
copy(data, buf[:bytesRead]) // slices use the same underlying array, so we need a new one for each packet
select { // needs select here because packet consumer can quit and the packets channel gets filled up and blocks
case packets <- packet{data: data, raddr: raddr}:
case <-n.grp.Ch():
return
}
}
}()
n.grp.Add(1)
go func() {
defer n.grp.Done()
var pkt packet
for {
select {
case pkt = <-packets:
n.handlePacket(pkt)
case <-n.grp.Ch():
return
}
}
}()
// TODO: turn this back on when you're sure it works right
n.grp.Add(1)
go func() {
defer n.grp.Done()
n.startRoutingTableGrooming()
}()
return nil
}
// Shutdown shuts down the node
func (n *Node) Shutdown() {
log.Debugf("[%s] node shutting down", n.id.HexShort())
n.grp.StopAndWait()
log.Debugf("[%s] node stopped", n.id.HexShort())
}
// handlePacket handles packets received from udp.
func (n *Node) handlePacket(pkt packet) {
//log.Debugf("[%s] Received message from %s (%d bytes) %s", n.id.HexShort(), pkt.raddr.String(), len(pkt.data), hex.EncodeToString(pkt.data))
if !util.InSlice(string(pkt.data[0:5]), []string{"d1:0i", "di0ei"}) {
log.Errorf("[%s] data is not a well-formatted dict: (%d bytes) %s", n.id.HexShort(), len(pkt.data), hex.EncodeToString(pkt.data))
return
}
// the following is a bit of a hack, but it lets us avoid decoding every message twice
// it depends on the data being a dict with 0 as the first key (so it starts with "d1:0i") and the message type as the first value
// TODO: test this more thoroughly
switch pkt.data[5] {
case '0' + requestType:
request := Request{}
err := bencode.DecodeBytes(pkt.data, &request)
if err != nil {
log.Errorf("[%s] error decoding request from %s: %s: (%d bytes) %s", n.id.HexShort(), pkt.raddr.String(), err.Error(), len(pkt.data), hex.EncodeToString(pkt.data))
return
}
log.Debugf("[%s] query %s: received request from %s: %s(%s)", n.id.HexShort(), request.ID.HexShort(), request.NodeID.HexShort(), request.Method, request.argsDebug())
n.handleRequest(pkt.raddr, request)
case '0' + responseType:
response := Response{}
err := bencode.DecodeBytes(pkt.data, &response)
if err != nil {
log.Errorf("[%s] error decoding response from %s: %s: (%d bytes) %s", n.id.HexShort(), pkt.raddr.String(), err.Error(), len(pkt.data), hex.EncodeToString(pkt.data))
return
}
log.Debugf("[%s] query %s: received response from %s: %s", n.id.HexShort(), response.ID.HexShort(), response.NodeID.HexShort(), response.argsDebug())
n.handleResponse(pkt.raddr, response)
case '0' + errorType:
e := Error{}
err := bencode.DecodeBytes(pkt.data, &e)
if err != nil {
log.Errorf("[%s] error decoding error from %s: %s: (%d bytes) %s", n.id.HexShort(), pkt.raddr.String(), err.Error(), len(pkt.data), hex.EncodeToString(pkt.data))
return
}
log.Debugf("[%s] query %s: received error from %s: %s", n.id.HexShort(), e.ID.HexShort(), e.NodeID.HexShort(), e.ExceptionType)
n.handleError(pkt.raddr, e)
default:
log.Errorf("[%s] invalid message type: %s", n.id.HexShort(), string(pkt.data[5]))
return
}
}
// handleRequest handles the requests received from udp.
func (n *Node) handleRequest(addr *net.UDPAddr, request Request) {
if request.NodeID.Equals(n.id) {
log.Warn("ignoring self-request")
return
}
// if a handler is overridden, call it instead
if n.requestHandler != nil {
n.requestHandler(addr, request)
return
}
switch request.Method {
default:
//n.sendMessage(addr, Error{ID: request.ID, NodeID: n.id, ExceptionType: "invalid-request-method"})
log.Errorln("invalid request method")
return
case pingMethod:
err := n.sendMessage(addr, Response{ID: request.ID, NodeID: n.id, Data: pingSuccessResponse})
if err != nil {
log.Error("error sending 'pingmethod' response message - ", err)
}
case storeMethod:
// TODO: we should be sending the IP in the request, not just using the sender's IP
// TODO: should we be using StoreArgs.NodeID or StoreArgs.Value.LbryID ???
if n.tokens.Verify(request.StoreArgs.Value.Token, request.NodeID, addr) {
n.Store(request.StoreArgs.BlobHash, Contact{ID: request.StoreArgs.NodeID, IP: addr.IP, Port: addr.Port, PeerPort: request.StoreArgs.Value.Port})
err := n.sendMessage(addr, Response{ID: request.ID, NodeID: n.id, Data: storeSuccessResponse})
if err != nil {
log.Error("error sending 'storemethod' response message - ", err)
}
} else {
err := n.sendMessage(addr, Error{ID: request.ID, NodeID: n.id, ExceptionType: "invalid-token"})
if err != nil {
log.Error("error sending 'storemethod'response message for invalid-token - ", err)
}
}
case findNodeMethod:
if request.Arg == nil {
log.Errorln("request is missing arg")
return
}
err := n.sendMessage(addr, Response{
ID: request.ID,
NodeID: n.id,
Contacts: n.rt.GetClosest(*request.Arg, bucketSize),
})
if err != nil {
log.Error("error sending 'findnodemethod' response message - ", err)
}
case findValueMethod:
if request.Arg == nil {
log.Errorln("request is missing arg")
return
}
res := Response{
ID: request.ID,
NodeID: n.id,
Token: n.tokens.Get(request.NodeID, addr),
}
if contacts := n.store.Get(*request.Arg); len(contacts) > 0 {
res.FindValueKey = request.Arg.RawString()
res.Contacts = contacts
} else {
res.Contacts = n.rt.GetClosest(*request.Arg, bucketSize)
}
err := n.sendMessage(addr, res)
if err != nil {
log.Error("error sending 'findvaluemethod' response message - ", err)
}
}
// nodes that send us requests should not be inserted, only refreshed.
// the routing table must only contain "good" nodes, which are nodes that reply to our requests
// if a node is already good (aka in the table), its fine to refresh it
// http://www.bittorrent.org/beps/bep_0005.html#routing-table
n.rt.Fresh(Contact{ID: request.NodeID, IP: addr.IP, Port: addr.Port})
}
// handleResponse handles responses received from udp.
func (n *Node) handleResponse(addr *net.UDPAddr, response Response) {
tx := n.txFind(response.ID, Contact{ID: response.NodeID, IP: addr.IP, Port: addr.Port})
if tx != nil {
select {
case tx.res <- response:
default:
//log.Errorf("[%s] query %s: response received, but tx has no listener or multiple responses to the same tx", n.id.HexShort(), response.ID.HexShort())
}
}
n.rt.Update(Contact{ID: response.NodeID, IP: addr.IP, Port: addr.Port})
}
// handleError handles errors received from udp.
func (n *Node) handleError(addr *net.UDPAddr, e Error) {
spew.Dump(e)
n.rt.Fresh(Contact{ID: e.NodeID, IP: addr.IP, Port: addr.Port})
}
// send sends data to a udp address
func (n *Node) sendMessage(addr *net.UDPAddr, data Message) error {
encoded, err := bencode.EncodeBytes(data)
if err != nil {
return errors.Err(err)
}
if req, ok := data.(Request); ok {
log.Debugf("[%s] query %s: sending request to %s (%d bytes) %s(%s)",
n.id.HexShort(), req.ID.HexShort(), addr.String(), len(encoded), req.Method, req.argsDebug())
} else if res, ok := data.(Response); ok {
log.Debugf("[%s] query %s: sending response to %s (%d bytes) %s",
n.id.HexShort(), res.ID.HexShort(), addr.String(), len(encoded), res.argsDebug())
} else {
log.Debugf("[%s] (%d bytes) %s", n.id.HexShort(), len(encoded), spew.Sdump(data))
}
err = n.conn.SetWriteDeadline(time.Now().Add(5 * time.Second))
if err != nil {
if n.connClosed {
return nil
}
log.Error("error setting write deadline - ", err)
}
_, err = n.conn.WriteToUDP(encoded, addr)
return errors.Err(err)
}
// transaction represents a single query to the dht. it stores the queried contact, the request, and the response channel
type transaction struct {
contact Contact
req Request
res chan Response
skipIDCheck bool
}
// insert adds a transaction to the manager.
func (n *Node) txInsert(tx *transaction) {
n.txLock.Lock()
defer n.txLock.Unlock()
n.transactions[tx.req.ID] = tx
}
// delete removes a transaction from the manager.
func (n *Node) txDelete(id messageID) {
n.txLock.Lock()
defer n.txLock.Unlock()
delete(n.transactions, id)
}
// Find finds a transaction for the given id and contact
func (n *Node) txFind(id messageID, c Contact) *transaction {
n.txLock.RLock()
defer n.txLock.RUnlock()
t, ok := n.transactions[id]
if !ok || !t.contact.Equals(c, !t.skipIDCheck) {
return nil
}
return t
}
// SendOptions controls the behavior of send calls
type SendOptions struct {
skipIDCheck bool
}
// SendAsync sends a transaction and returns a channel that will eventually contain the transaction response
// The response channel is closed when the transaction is completed or times out.
func (n *Node) SendAsync(contact Contact, req Request, options ...SendOptions) <-chan *Response {
ch := make(chan *Response, 1)
if contact.ID.Equals(n.id) {
log.Error("sending query to self")
close(ch)
return ch
}
go func() {
defer close(ch)
req.ID = newMessageID()
req.NodeID = n.id
tx := &transaction{
contact: contact,
req: req,
res: make(chan Response),
}
if len(options) > 0 && options[0].skipIDCheck {
tx.skipIDCheck = true
}
n.txInsert(tx)
defer n.txDelete(tx.req.ID)
for i := 0; i < udpRetry; i++ {
err := n.sendMessage(contact.Addr(), tx.req)
if err != nil {
if !strings.Contains(err.Error(), "use of closed network connection") { // this only happens on localhost. real UDP has no connections
log.Error("send error: ", err)
}
continue
}
select {
case res := <-tx.res:
ch <- &res
return
case <-n.grp.Ch():
return
case <-time.After(udpTimeout):
}
}
// notify routing table about a failure to respond
n.rt.Fail(tx.contact)
}()
return ch
}
// Send sends a transaction and blocks until the response is available. It returns a response, or nil
// if the transaction timed out.
func (n *Node) Send(contact Contact, req Request, options ...SendOptions) *Response {
return <-n.SendAsync(contact, req, options...)
}
// CountActiveTransactions returns the number of transactions in the manager
func (n *Node) CountActiveTransactions() int {
n.txLock.Lock()
defer n.txLock.Unlock()
return len(n.transactions)
}
func (n *Node) startRoutingTableGrooming() {
refreshTicker := time.NewTicker(tRefresh / 5) // how often to check for buckets that need to be refreshed
for {
select {
case <-refreshTicker.C:
RoutingTableRefresh(n, tRefresh, n.grp.Child())
case <-n.grp.Ch():
return
}
}
}
// Store stores a node contact in the node's contact store.
func (n *Node) Store(hash bits.Bitmap, c Contact) {
n.store.Upsert(hash, c)
}
//AddKnownNode adds a known-good node to the routing table
func (n *Node) AddKnownNode(c Contact) {
n.rt.Update(c)
}

338
dht/node_finder.go Normal file
View file

@ -0,0 +1,338 @@
package dht
import (
"sync"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/crypto"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/sirupsen/logrus"
"go.uber.org/atomic"
)
// TODO: iterativeFindValue may be stopping early. if it gets a response with one peer, it should keep going because other nodes may know about more peers that have that blob
// TODO: or, it should try a tcp handshake with peers as it finds them, to make sure they are still online and have the blob
var cfLog *logrus.Logger
func init() {
cfLog = logrus.StandardLogger()
}
func NodeFinderUseLogger(l *logrus.Logger) {
cfLog = l
}
type contactFinder struct {
findValue bool // true if we're using findValue
target bits.Bitmap
node *Node
grp *stop.Group
findValueMutex *sync.Mutex
findValueResult []Contact
activeContactsMutex *sync.Mutex
activeContacts []Contact
shortlistMutex *sync.Mutex
shortlist []Contact
shortlistAdded map[bits.Bitmap]bool
closestContactMutex *sync.RWMutex
closestContact *Contact
notGettingCloser *atomic.Bool
}
func FindContacts(node *Node, target bits.Bitmap, findValue bool, parentGrp *stop.Group) ([]Contact, bool, error) {
cf := &contactFinder{
node: node,
target: target,
findValue: findValue,
findValueMutex: &sync.Mutex{},
activeContactsMutex: &sync.Mutex{},
shortlistMutex: &sync.Mutex{},
shortlistAdded: make(map[bits.Bitmap]bool),
grp: stop.New(parentGrp),
closestContactMutex: &sync.RWMutex{},
notGettingCloser: atomic.NewBool(false),
}
return cf.Find()
}
func (cf *contactFinder) Stop() {
cf.grp.StopAndWait()
}
func (cf *contactFinder) Find() ([]Contact, bool, error) {
if cf.findValue {
cf.debug("starting iterativeFindValue")
} else {
cf.debug("starting iterativeFindNode")
}
cf.appendNewToShortlist(cf.node.rt.GetClosest(cf.target, alpha))
if len(cf.shortlist) == 0 {
return nil, false, errors.Err("[%s] find %s: no contacts in routing table", cf.node.id.HexShort(), cf.target.HexShort())
}
go cf.cycle(false)
timeout := 5 * time.Second
CycleLoop:
for {
select {
case <-time.After(timeout):
go cf.cycle(false)
case <-cf.grp.Ch():
break CycleLoop
}
}
// TODO: what to do if we have less than K active contacts, shortlist is empty, but we have other contacts in our routing table whom we have not contacted. prolly contact them
var contacts []Contact
var found bool
if cf.findValue && len(cf.findValueResult) > 0 {
contacts = cf.findValueResult
found = true
} else {
contacts = cf.activeContacts
if len(contacts) > bucketSize {
contacts = contacts[:bucketSize]
}
}
cf.Stop()
return contacts, found, nil
}
// cycle does a single cycle of sending alpha probes and checking results against closestNode
func (cf *contactFinder) cycle(bigCycle bool) {
cycleID := crypto.RandString(6)
if bigCycle {
cf.debug("LAUNCHING CYCLE %s, AND ITS A BIG CYCLE", cycleID)
} else {
cf.debug("LAUNCHING CYCLE %s", cycleID)
}
defer cf.debug("CYCLE %s DONE", cycleID)
cf.closestContactMutex.RLock()
closestContact := cf.closestContact
cf.closestContactMutex.RUnlock()
var wg sync.WaitGroup
ch := make(chan *Contact)
limit := alpha
if bigCycle {
limit = bucketSize
}
for i := 0; i < limit; i++ {
wg.Add(1)
go func() {
defer wg.Done()
ch <- cf.probe(cycleID)
}()
}
go func() {
wg.Wait()
close(ch)
}()
foundCloser := false
for {
c, more := <-ch
if !more {
break
}
if c != nil && (closestContact == nil || cf.target.Closer(c.ID, closestContact.ID)) {
if closestContact != nil {
cf.debug("|%s| best contact improved: %s -> %s", cycleID, closestContact.ID.HexShort(), c.ID.HexShort())
} else {
cf.debug("|%s| best contact starting at %s", cycleID, c.ID.HexShort())
}
foundCloser = true
closestContact = c
}
}
if cf.isSearchFinished() {
cf.grp.Stop()
return
}
if foundCloser {
cf.closestContactMutex.Lock()
// have to check again after locking in case other probes found a closer one in the meantime
if cf.closestContact == nil || cf.target.Closer(closestContact.ID, cf.closestContact.ID) {
cf.closestContact = closestContact
}
cf.closestContactMutex.Unlock()
go cf.cycle(false)
} else if !bigCycle {
cf.debug("|%s| no improvement, running big cycle", cycleID)
go cf.cycle(true)
} else {
// big cycle ran and there was no improvement, so we're done
cf.debug("|%s| big cycle ran, still no improvement", cycleID)
cf.notGettingCloser.Store(true)
}
}
// probe sends a single probe, updates the lists, and returns the closest contact it found
func (cf *contactFinder) probe(cycleID string) *Contact {
maybeContact := cf.popFromShortlist()
if maybeContact == nil {
cf.debug("|%s| no contacts in shortlist, returning", cycleID)
return nil
}
c := *maybeContact
if c.ID.Equals(cf.node.id) {
return nil
}
cf.debug("|%s| probe %s: launching", cycleID, c.ID.HexShort())
req := Request{Arg: &cf.target}
if cf.findValue {
req.Method = findValueMethod
} else {
req.Method = findNodeMethod
}
var res *Response
resCh := cf.node.SendAsync(c, req)
select {
case res = <-resCh:
case <-cf.grp.Ch():
cf.debug("|%s| probe %s: canceled", cycleID, c.ID.HexShort())
return nil
}
if res == nil {
cf.debug("|%s| probe %s: req canceled or timed out", cycleID, c.ID.HexShort())
return nil
}
if cf.findValue && res.FindValueKey != "" {
cf.debug("|%s| probe %s: got value", cycleID, c.ID.HexShort())
cf.findValueMutex.Lock()
cf.findValueResult = res.Contacts
cf.findValueMutex.Unlock()
cf.grp.Stop()
return nil
}
cf.debug("|%s| probe %s: got %s", cycleID, c.ID.HexShort(), res.argsDebug())
cf.insertIntoActiveList(c)
cf.appendNewToShortlist(res.Contacts)
cf.activeContactsMutex.Lock()
contacts := cf.activeContacts
if len(contacts) > bucketSize {
contacts = contacts[:bucketSize]
}
contactsStr := ""
for _, c := range contacts {
contactsStr += c.ID.HexShort() + ", "
}
cf.activeContactsMutex.Unlock()
return cf.closest(res.Contacts...)
}
// appendNewToShortlist appends any new contacts to the shortlist and sorts it by distance
// contacts that have already been added to the shortlist in the past are ignored
func (cf *contactFinder) appendNewToShortlist(contacts []Contact) {
cf.shortlistMutex.Lock()
defer cf.shortlistMutex.Unlock()
for _, c := range contacts {
if _, ok := cf.shortlistAdded[c.ID]; !ok {
cf.shortlist = append(cf.shortlist, c)
cf.shortlistAdded[c.ID] = true
}
}
sortByDistance(cf.shortlist, cf.target)
}
// popFromShortlist pops the first contact off the shortlist and returns it
func (cf *contactFinder) popFromShortlist() *Contact {
cf.shortlistMutex.Lock()
defer cf.shortlistMutex.Unlock()
if len(cf.shortlist) == 0 {
return nil
}
first := cf.shortlist[0]
cf.shortlist = cf.shortlist[1:]
return &first
}
// insertIntoActiveList inserts the contact into appropriate place in the list of active contacts (sorted by distance)
func (cf *contactFinder) insertIntoActiveList(contact Contact) {
cf.activeContactsMutex.Lock()
defer cf.activeContactsMutex.Unlock()
inserted := false
for i, n := range cf.activeContacts {
if cf.target.Closer(contact.ID, n.ID) {
cf.activeContacts = append(cf.activeContacts[:i], append([]Contact{contact}, cf.activeContacts[i:]...)...)
inserted = true
break
}
}
if !inserted {
cf.activeContacts = append(cf.activeContacts, contact)
}
}
// isSearchFinished returns true if the search is done and should be stopped
func (cf *contactFinder) isSearchFinished() bool {
if cf.findValue && len(cf.findValueResult) > 0 {
return true
}
select {
case <-cf.grp.Ch():
return true
default:
}
if cf.notGettingCloser.Load() {
return true
}
cf.activeContactsMutex.Lock()
defer cf.activeContactsMutex.Unlock()
return len(cf.activeContacts) >= bucketSize
}
func (cf *contactFinder) debug(format string, args ...interface{}) {
args = append([]interface{}{cf.node.id.HexShort()}, append([]interface{}{cf.target.HexShort()}, args...)...)
cfLog.Debugf("[%s] find %s: "+format, args...)
}
func (cf *contactFinder) closest(contacts ...Contact) *Contact {
if len(contacts) == 0 {
return nil
}
closest := contacts[0]
for _, c := range contacts {
if cf.target.Closer(c.ID, closest.ID) {
closest = c
}
}
return &closest
}

422
dht/node_test.go Normal file
View file

@ -0,0 +1,422 @@
package dht
import (
"net"
"testing"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lyoshenka/bencode"
)
func TestPing(t *testing.T) {
dhtNodeID := bits.Rand()
testNodeID := bits.Rand()
conn := newTestUDPConn("127.0.0.1:21217")
dht := New(&Config{Address: "127.0.0.1:21216", NodeID: dhtNodeID.Hex()})
err := dht.connect(conn)
if err != nil {
t.Fatal(err)
}
defer dht.Shutdown()
messageID := newMessageID()
data, err := bencode.EncodeBytes(map[string]interface{}{
headerTypeField: requestType,
headerMessageIDField: messageID,
headerNodeIDField: testNodeID.RawString(),
headerPayloadField: "ping",
headerArgsField: []string{},
})
if err != nil {
panic(err)
}
conn.toRead <- testUDPPacket{addr: conn.addr, data: data}
timer := time.NewTimer(3 * time.Second)
select {
case <-timer.C:
t.Error("timeout")
case resp := <-conn.writes:
var response map[string]interface{}
err := bencode.DecodeBytes(resp.data, &response)
if err != nil {
t.Fatal(err)
}
if len(response) != 4 {
t.Errorf("expected 4 response fields, got %d", len(response))
}
_, ok := response[headerTypeField]
if !ok {
t.Error("missing type field")
} else {
rType, ok := response[headerTypeField].(int64)
if !ok {
t.Error("type is not an integer")
} else if rType != responseType {
t.Error("unexpected response type")
}
}
_, ok = response[headerMessageIDField]
if !ok {
t.Error("missing message id field")
} else {
rMessageID, ok := response[headerMessageIDField].(string)
if !ok {
t.Error("message ID is not a string")
} else if rMessageID != string(messageID[:]) {
t.Error("unexpected message ID")
}
}
_, ok = response[headerNodeIDField]
if !ok {
t.Error("missing node id field")
} else {
rNodeID, ok := response[headerNodeIDField].(string)
if !ok {
t.Error("node ID is not a string")
} else if rNodeID != dhtNodeID.RawString() {
t.Error("unexpected node ID")
}
}
_, ok = response[headerPayloadField]
if !ok {
t.Error("missing payload field")
} else {
rNodeID, ok := response[headerPayloadField].(string)
if !ok {
t.Error("payload is not a string")
} else if rNodeID != pingSuccessResponse {
t.Error("did not pong")
}
}
}
}
func TestStore(t *testing.T) {
dhtNodeID := bits.Rand()
testNodeID := bits.Rand()
conn := newTestUDPConn("127.0.0.1:21217")
dht := New(&Config{Address: "127.0.0.1:21216", NodeID: dhtNodeID.Hex()})
err := dht.connect(conn)
if err != nil {
t.Fatal(err)
}
defer dht.Shutdown()
messageID := newMessageID()
blobHashToStore := bits.Rand()
storeRequest := Request{
ID: messageID,
NodeID: testNodeID,
Method: storeMethod,
StoreArgs: &storeArgs{
BlobHash: blobHashToStore,
Value: storeArgsValue{
Token: dht.node.tokens.Get(testNodeID, conn.addr),
LbryID: testNodeID,
Port: 9999,
},
NodeID: testNodeID,
},
}
_ = "64 " + // start message
"313A30 693065" + // type: 0
"313A31 3230 3A 6EB490B5788B63F0F7E6D92352024D0CBDEC2D3A" + // message id
"313A32 3438 3A 7CE1B831DEC8689E44F80F547D2DEA171F6A625E1A4FF6C6165E645F953103DABEB068A622203F859C6C64658FD3AA3B" + // node id
"313A33 35 3A 73746F7265" + // method
"313A34 6C" + // start args list
"3438 3A 3214D6C2F77FCB5E8D5FC07EDAFBA614F031CE8B2EAB49F924F8143F6DFBADE048D918710072FB98AB1B52B58F4E1468" + // block hash
"64" + // start value dict
"363A6C6272796964 3438 3A 7CE1B831DEC8689E44F80F547D2DEA171F6A625E1A4FF6C6165E645F953103DABEB068A622203F859C6C64658FD3AA3B" + // lbry id
"343A706F7274 69 33333333 65" + // port
"353A746F6B656E 3438 3A 17C2D8E1E48EF21567FE4AD5C8ED944B798D3B65AB58D0C9122AD6587D1B5FED472EA2CB12284CEFA1C21EFF302322BD" + // token
"65" + // end value dict
"3438 3A 7CE1B831DEC8689E44F80F547D2DEA171F6A625E1A4FF6C6165E645F953103DABEB068A622203F859C6C64658FD3AA3B" + // node id
"693065" + // self store (integer)
"65" + // end args list
"65" // end message
data, err := bencode.EncodeBytes(storeRequest)
if err != nil {
t.Fatal(err)
}
conn.toRead <- testUDPPacket{addr: conn.addr, data: data}
timer := time.NewTimer(3 * time.Second)
var response map[string]interface{}
select {
case <-timer.C:
t.Fatal("timeout")
case resp := <-conn.writes:
err := bencode.DecodeBytes(resp.data, &response)
if err != nil {
t.Fatal(err)
}
}
verifyResponse(t, response, messageID, dhtNodeID.RawString())
_, ok := response[headerPayloadField]
if !ok {
t.Error("missing payload field")
} else {
rNodeID, ok := response[headerPayloadField].(string)
if !ok {
t.Error("payload is not a string")
} else if rNodeID != storeSuccessResponse {
t.Error("did not return OK")
}
}
if dht.node.store.CountStoredHashes() != 1 {
t.Error("dht store has wrong number of items")
}
items := dht.node.store.Get(blobHashToStore)
if len(items) != 1 {
t.Error("list created in store, but nothing in list")
}
if !items[0].ID.Equals(testNodeID) {
t.Error("wrong value stored")
}
}
func TestFindNode(t *testing.T) {
dhtNodeID := bits.Rand()
testNodeID := bits.Rand()
conn := newTestUDPConn("127.0.0.1:21217")
dht := New(&Config{Address: "127.0.0.1:21216", NodeID: dhtNodeID.Hex()})
err := dht.connect(conn)
if err != nil {
t.Fatal(err)
}
defer dht.Shutdown()
nodesToInsert := 3
var nodes []Contact
for i := 0; i < nodesToInsert; i++ {
n := Contact{ID: bits.Rand(), IP: net.ParseIP("127.0.0.1"), Port: 10000 + i}
nodes = append(nodes, n)
dht.node.rt.Update(n)
}
messageID := newMessageID()
blobHashToFind := bits.Rand()
request := Request{
ID: messageID,
NodeID: testNodeID,
Method: findNodeMethod,
Arg: &blobHashToFind,
}
data, err := bencode.EncodeBytes(request)
if err != nil {
t.Fatal(err)
}
conn.toRead <- testUDPPacket{addr: conn.addr, data: data}
timer := time.NewTimer(3 * time.Second)
var response map[string]interface{}
select {
case <-timer.C:
t.Fatal("timeout")
case resp := <-conn.writes:
err := bencode.DecodeBytes(resp.data, &response)
if err != nil {
t.Fatal(err)
}
}
verifyResponse(t, response, messageID, dhtNodeID.RawString())
_, ok := response[headerPayloadField]
if !ok {
t.Fatal("missing payload field")
}
contacts, ok := response[headerPayloadField].([]interface{})
if !ok {
t.Fatal("payload is not a list")
}
verifyContacts(t, contacts, nodes)
}
func TestFindValueExisting(t *testing.T) {
dhtNodeID := bits.Rand()
testNodeID := bits.Rand()
conn := newTestUDPConn("127.0.0.1:21217")
dht := New(&Config{Address: "127.0.0.1:21216", NodeID: dhtNodeID.Hex()})
err := dht.connect(conn)
if err != nil {
t.Fatal(err)
}
defer dht.Shutdown()
nodesToInsert := 3
for i := 0; i < nodesToInsert; i++ {
n := Contact{ID: bits.Rand(), IP: net.ParseIP("127.0.0.1"), Port: 10000 + i}
dht.node.rt.Update(n)
}
//data, _ := hex.DecodeString("64313a30693065313a3132303a7de8e57d34e316abbb5a8a8da50dcd1ad4c80e0f313a3234383a7ce1b831dec8689e44f80f547d2dea171f6a625e1a4ff6c6165e645f953103dabeb068a622203f859c6c64658fd3aa3b313a33393a66696e6456616c7565313a346c34383aa47624b8e7ee1e54df0c45e2eb858feb0b705bd2a78d8b739be31ba188f4bd6f56b371c51fecc5280d5fd26ba4168e966565")
messageID := newMessageID()
valueToFind := bits.Rand()
nodeToFind := Contact{ID: bits.Rand(), IP: net.ParseIP("1.2.3.4"), PeerPort: 1286}
dht.node.store.Upsert(valueToFind, nodeToFind)
dht.node.store.Upsert(valueToFind, nodeToFind)
dht.node.store.Upsert(valueToFind, nodeToFind)
request := Request{
ID: messageID,
NodeID: testNodeID,
Method: findValueMethod,
Arg: &valueToFind,
}
data, err := bencode.EncodeBytes(request)
if err != nil {
t.Fatal(err)
}
conn.toRead <- testUDPPacket{addr: conn.addr, data: data}
timer := time.NewTimer(3 * time.Second)
var response map[string]interface{}
select {
case <-timer.C:
t.Fatal("timeout")
case resp := <-conn.writes:
err := bencode.DecodeBytes(resp.data, &response)
if err != nil {
t.Fatal(err)
}
}
verifyResponse(t, response, messageID, dhtNodeID.RawString())
_, ok := response[headerPayloadField]
if !ok {
t.Fatal("missing payload field")
}
payload, ok := response[headerPayloadField].(map[string]interface{})
if !ok {
t.Fatal("payload is not a dictionary")
}
compactContacts, ok := payload[valueToFind.RawString()]
if !ok {
t.Fatal("payload is missing key for search value")
}
contacts, ok := compactContacts.([]interface{})
if !ok {
t.Fatal("search results are not a list")
}
verifyCompactContacts(t, contacts, []Contact{nodeToFind})
}
func TestFindValueFallbackToFindNode(t *testing.T) {
dhtNodeID := bits.Rand()
testNodeID := bits.Rand()
conn := newTestUDPConn("127.0.0.1:21217")
dht := New(&Config{Address: "127.0.0.1:21216", NodeID: dhtNodeID.Hex()})
err := dht.connect(conn)
if err != nil {
t.Fatal(err)
}
defer dht.Shutdown()
nodesToInsert := 3
var nodes []Contact
for i := 0; i < nodesToInsert; i++ {
n := Contact{ID: bits.Rand(), IP: net.ParseIP("127.0.0.1"), Port: 10000 + i}
nodes = append(nodes, n)
dht.node.rt.Update(n)
}
messageID := newMessageID()
valueToFind := bits.Rand()
request := Request{
ID: messageID,
NodeID: testNodeID,
Method: findValueMethod,
Arg: &valueToFind,
}
data, err := bencode.EncodeBytes(request)
if err != nil {
t.Fatal(err)
}
conn.toRead <- testUDPPacket{addr: conn.addr, data: data}
timer := time.NewTimer(3 * time.Second)
var response map[string]interface{}
select {
case <-timer.C:
t.Fatal("timeout")
case resp := <-conn.writes:
err := bencode.DecodeBytes(resp.data, &response)
if err != nil {
t.Fatal(err)
}
}
verifyResponse(t, response, messageID, dhtNodeID.RawString())
_, ok := response[headerPayloadField]
if !ok {
t.Fatal("missing payload field")
}
payload, ok := response[headerPayloadField].(map[string]interface{})
if !ok {
t.Fatal("payload is not a dictionary")
}
contactsList, ok := payload[contactsField]
if !ok {
t.Fatal("payload is missing 'contacts' key")
}
contacts, ok := contactsList.([]interface{})
if !ok {
t.Fatal("'contacts' is not a list")
}
verifyContacts(t, contacts, nodes)
}

463
dht/routing_table.go Normal file
View file

@ -0,0 +1,463 @@
package dht
import (
"encoding/json"
"fmt"
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
)
// TODO: if routing table is ever empty (aka the node is isolated), it should re-bootstrap
// TODO: use a tree with bucket splitting instead of a fixed bucket list. include jack's optimization (see link in commit mesg)
// https://github.com/lbryio/lbry/pull/1211/commits/341b27b6d21ac027671d42458826d02735aaae41
// peer is a contact with extra information
type peer struct {
Contact Contact
Distance bits.Bitmap
LastActivity time.Time
// LastReplied time.Time
// LastRequested time.Time
// LastFailure time.Time
// SecondLastFailure time.Time
NumFailures int
//<lastPublished>,
//<originallyPublished>
// <originalPublisherID>
}
func (p *peer) Touch() {
p.LastActivity = time.Now()
p.NumFailures = 0
}
// ActiveSince returns whether a peer has responded in the last `d` duration
// this is used to check if the peer is "good", meaning that we believe the peer will respond to our requests
func (p *peer) ActiveInLast(d time.Duration) bool {
return time.Since(p.LastActivity) < d
}
// IsBad returns whether a peer is "bad", meaning that it has failed to respond to multiple pings in a row
func (p *peer) IsBad(maxFalures int) bool {
return p.NumFailures >= maxFalures
}
// Fail marks a peer as having failed to respond. It returns whether or not the peer should be removed from the routing table
func (p *peer) Fail() {
p.NumFailures++
}
type bucket struct {
lock *sync.RWMutex
peers []peer
lastUpdate time.Time
Range bits.Range // capitalized because `range` is a keyword
}
func newBucket(r bits.Range) *bucket {
return &bucket{
peers: make([]peer, 0, bucketSize),
lock: &sync.RWMutex{},
Range: r,
}
}
// Len returns the number of peers in the bucket
func (b bucket) Len() int {
b.lock.RLock()
defer b.lock.RUnlock()
return len(b.peers)
}
func (b bucket) Has(c Contact) bool {
b.lock.RLock()
defer b.lock.RUnlock()
for _, p := range b.peers {
if p.Contact.Equals(c, true) {
return true
}
}
return false
}
// Contacts returns a slice of the bucket's contacts
func (b bucket) Contacts() []Contact {
b.lock.RLock()
defer b.lock.RUnlock()
contacts := make([]Contact, len(b.peers))
for i := range b.peers {
contacts[i] = b.peers[i].Contact
}
return contacts
}
// UpdatePeer marks a contact as having been successfully contacted. if insertIfNew and the contact is does not exist yet, it is inserted
func (b *bucket) UpdatePeer(p peer, insertIfNew bool) error {
b.lock.Lock()
defer b.lock.Unlock()
if !b.Range.Contains(p.Distance) {
return errors.Err("this bucket range does not cover this peer")
}
peerIndex := find(p.Contact.ID, b.peers)
if peerIndex >= 0 {
b.lastUpdate = time.Now()
b.peers[peerIndex].Touch()
moveToBack(b.peers, peerIndex)
} else if insertIfNew {
hasRoom := true
if len(b.peers) >= bucketSize {
hasRoom = false
for i := range b.peers {
if b.peers[i].IsBad(maxPeerFails) {
// TODO: Ping contact first. Only remove if it does not respond
b.peers = append(b.peers[:i], b.peers[i+1:]...)
hasRoom = true
break
}
}
}
if hasRoom {
b.lastUpdate = time.Now()
p.Touch()
b.peers = append(b.peers, p)
}
}
return nil
}
// FailContact marks a contact as having failed, and removes it if it failed too many times
func (b *bucket) FailContact(id bits.Bitmap) {
b.lock.Lock()
defer b.lock.Unlock()
i := find(id, b.peers)
if i >= 0 {
// BEP5 says not to remove the contact until the bucket is full and you try to insert
b.peers[i].Fail()
}
}
// find returns the contact in the bucket, or nil if the bucket does not contain the contact
func find(id bits.Bitmap, peers []peer) int {
for i := range peers {
if peers[i].Contact.ID.Equals(id) {
return i
}
}
return -1
}
// NeedsRefresh returns true if bucket has not been updated in the last `refreshInterval`, false otherwise
func (b *bucket) NeedsRefresh(refreshInterval time.Duration) bool {
b.lock.RLock()
defer b.lock.RUnlock()
return time.Since(b.lastUpdate) > refreshInterval
}
func (b *bucket) Split() (*bucket, *bucket) {
b.lock.Lock()
defer b.lock.Unlock()
left := newBucket(b.Range.IntervalP(1, 2))
right := newBucket(b.Range.IntervalP(2, 2))
left.lastUpdate = b.lastUpdate
right.lastUpdate = b.lastUpdate
for _, p := range b.peers {
if left.Range.Contains(p.Distance) {
left.peers = append(left.peers, p)
} else {
right.peers = append(right.peers, p)
}
}
if len(b.peers) > 1 {
if len(left.peers) == 0 {
left, right = right.Split()
left.Range.Start = b.Range.Start
} else if len(right.peers) == 0 {
left, right = left.Split()
right.Range.End = b.Range.End
}
}
return left, right
}
type routingTable struct {
id bits.Bitmap
buckets []*bucket
mu *sync.RWMutex // this mutex is write-locked only when CHANGING THE NUMBER OF BUCKETS in the table
}
func newRoutingTable(id bits.Bitmap) *routingTable {
rt := routingTable{
id: id,
mu: &sync.RWMutex{},
}
rt.reset()
return &rt
}
func (rt *routingTable) reset() {
rt.mu.Lock()
defer rt.mu.Unlock()
rt.buckets = []*bucket{newBucket(bits.MaxRange())}
}
func (rt *routingTable) BucketInfo() string {
rt.mu.RLock()
defer rt.mu.RUnlock()
var bucketInfo []string
for i, b := range rt.buckets {
if b.Len() > 0 {
contacts := b.Contacts()
s := make([]string, len(contacts))
for j, c := range contacts {
s[j] = c.ID.HexShort()
}
bucketInfo = append(bucketInfo, fmt.Sprintf("bucket %d: (%d) %s", i, len(contacts), strings.Join(s, ", ")))
}
}
if len(bucketInfo) == 0 {
return "buckets are empty"
}
return strings.Join(bucketInfo, "\n")
}
// Update inserts or refreshes a contact
func (rt *routingTable) Update(c Contact) {
rt.mu.Lock() // write lock, because updates may cause bucket splits
defer rt.mu.Unlock()
b := rt.bucketFor(c.ID)
if rt.shouldSplit(b, c) {
left, right := b.Split()
for i := range rt.buckets {
if rt.buckets[i].Range.Start.Equals(left.Range.Start) {
rt.buckets = append(rt.buckets[:i], append([]*bucket{left, right}, rt.buckets[i+1:]...)...)
break
}
}
if left.Range.Contains(c.ID) {
b = left
} else {
b = right
}
}
err := b.UpdatePeer(peer{Contact: c, Distance: rt.id.Xor(c.ID)}, true)
if err != nil {
log.Error(err)
}
}
// Fresh refreshes a contact if its already in the routing table
func (rt *routingTable) Fresh(c Contact) {
rt.mu.RLock()
defer rt.mu.RUnlock()
err := rt.bucketFor(c.ID).UpdatePeer(peer{Contact: c, Distance: rt.id.Xor(c.ID)}, false)
if err != nil {
log.Error(err)
}
}
// FailContact marks a contact as having failed, and removes it if it failed too many times
func (rt *routingTable) Fail(c Contact) {
rt.mu.RLock()
defer rt.mu.RUnlock()
rt.bucketFor(c.ID).FailContact(c.ID)
}
// GetClosest returns the closest `limit` contacts from the routing table.
// This is a locking wrapper around getClosest()
func (rt *routingTable) GetClosest(target bits.Bitmap, limit int) []Contact {
rt.mu.RLock()
defer rt.mu.RUnlock()
return rt.getClosest(target, limit)
}
// getClosest returns the closest `limit` contacts from the routing table
func (rt *routingTable) getClosest(target bits.Bitmap, limit int) []Contact {
var contacts []Contact
for _, b := range rt.buckets {
contacts = append(contacts, b.Contacts()...)
}
sortByDistance(contacts, target)
if len(contacts) > limit {
contacts = contacts[:limit]
}
return contacts
}
// Count returns the number of contacts in the routing table
func (rt *routingTable) Count() int {
rt.mu.RLock()
defer rt.mu.RUnlock()
count := 0
for _, bucket := range rt.buckets {
count += bucket.Len()
}
return count
}
// Len returns the number of buckets in the routing table
func (rt *routingTable) Len() int {
rt.mu.RLock()
defer rt.mu.RUnlock()
return len(rt.buckets)
}
func (rt *routingTable) bucketFor(target bits.Bitmap) *bucket {
if rt.id.Equals(target) {
panic("routing table does not have a bucket for its own id")
}
distance := target.Xor(rt.id)
for _, b := range rt.buckets {
if b.Range.Contains(distance) {
return b
}
}
panic("target is not contained in any buckets")
}
func (rt *routingTable) shouldSplit(b *bucket, c Contact) bool {
if b.Has(c) {
return false
}
if b.Len() >= bucketSize {
if b.Range.Start.Equals(bits.Bitmap{}) { // this is the bucket covering our node id
return true
}
kClosest := rt.getClosest(rt.id, bucketSize)
kthClosest := kClosest[len(kClosest)-1]
if rt.id.Closer(c.ID, kthClosest.ID) {
return true
}
}
return false
}
//func (rt *routingTable) printBucketInfo() {
// fmt.Printf("there are %d contacts in %d buckets\n", rt.Count(), rt.Len())
// for i, b := range rt.buckets {
// fmt.Printf("bucket %d, %d contacts\n", i+1, len(b.peers))
// fmt.Printf(" start : %s\n", b.Range.Start.String())
// fmt.Printf(" stop : %s\n", b.Range.End.String())
// fmt.Println("")
// }
//}
func (rt *routingTable) GetIDsForRefresh(refreshInterval time.Duration) []bits.Bitmap {
var bitmaps []bits.Bitmap
for i, bucket := range rt.buckets {
if bucket.NeedsRefresh(refreshInterval) {
bitmaps = append(bitmaps, bits.Rand().Prefix(i, false))
}
}
return bitmaps
}
const rtContactSep = "-"
type rtSave struct {
ID string `json:"id"`
Contacts []string `json:"contacts"`
}
func (rt *routingTable) MarshalJSON() ([]byte, error) {
var data rtSave
data.ID = rt.id.Hex()
for _, b := range rt.buckets {
for _, c := range b.Contacts() {
data.Contacts = append(data.Contacts, strings.Join([]string{c.ID.Hex(), c.IP.String(), strconv.Itoa(c.Port)}, rtContactSep))
}
}
return json.Marshal(data)
}
func (rt *routingTable) UnmarshalJSON(b []byte) error {
var data rtSave
err := json.Unmarshal(b, &data)
if err != nil {
return err
}
rt.id, err = bits.FromHex(data.ID)
if err != nil {
return errors.Prefix("decoding ID", err)
}
rt.reset()
for _, s := range data.Contacts {
parts := strings.Split(s, rtContactSep)
if len(parts) != 3 {
return errors.Err("decoding contact %s: wrong number of parts", s)
}
var c Contact
c.ID, err = bits.FromHex(parts[0])
if err != nil {
return errors.Err("decoding contact %s: invalid ID: %s", s, err)
}
c.IP = net.ParseIP(parts[1])
if c.IP == nil {
return errors.Err("decoding contact %s: invalid IP", s)
}
c.Port, err = strconv.Atoi(parts[2])
if err != nil {
return errors.Err("decoding contact %s: invalid port: %s", s, err)
}
rt.Update(c)
}
return nil
}
// RoutingTableRefresh refreshes any buckets that need to be refreshed
func RoutingTableRefresh(n *Node, refreshInterval time.Duration, parentGrp *stop.Group) {
done := stop.New()
for _, id := range n.rt.GetIDsForRefresh(refreshInterval) {
done.Add(1)
go func(id bits.Bitmap) {
defer done.Done()
_, _, err := FindContacts(n, id, false, parentGrp)
if err != nil {
log.Error("error finding contact during routing table refresh - ", err)
}
}(id)
}
done.Wait()
done.Stop()
}
func moveToBack(peers []peer, index int) {
if index < 0 || len(peers) <= index+1 {
return
}
p := peers[index]
for i := index; i < len(peers)-1; i++ {
peers[i] = peers[i+1]
}
peers[len(peers)-1] = p
}

328
dht/routing_table_test.go Normal file
View file

@ -0,0 +1,328 @@
package dht
import (
"encoding/json"
"math/big"
"net"
"strconv"
"strings"
"testing"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/sebdah/goldie"
)
func TestBucket_Split(t *testing.T) {
rt := newRoutingTable(bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"))
if len(rt.buckets) != 1 {
t.Errorf("there should only be one bucket so far")
}
if len(rt.buckets[0].peers) != 0 {
t.Errorf("there should be no contacts yet")
}
var tests = []struct {
name string
id bits.Bitmap
expectedBucketCount int
expectedTotalContacts int
}{
//fill first bucket
{"b1-one", bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100"), 1, 1},
{"b1-two", bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200"), 1, 2},
{"b1-three", bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300"), 1, 3},
{"b1-four", bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400"), 1, 4},
{"b1-five", bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000500"), 1, 5},
{"b1-six", bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600"), 1, 6},
{"b1-seven", bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000700"), 1, 7},
{"b1-eight", bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800"), 1, 8},
// split off second bucket and fill it
{"b2-one", bits.FromHexP("001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 2, 9},
{"b2-two", bits.FromHexP("002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 2, 10},
{"b2-three", bits.FromHexP("003000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 2, 11},
{"b2-four", bits.FromHexP("004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 2, 12},
{"b2-five", bits.FromHexP("005000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 2, 13},
{"b2-six", bits.FromHexP("006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 2, 14},
{"b2-seven", bits.FromHexP("007000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 2, 15},
// at this point there are two buckets. the first has 7 contacts, the second has 8
// inserts into the second bucket should be skipped
{"dont-split", bits.FromHexP("009000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 2, 15},
// ... unless the ID is closer than the kth-closest contact
{"split-kth-closest", bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), 2, 16},
{"b3-two", bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002"), 3, 17},
{"b3-three", bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003"), 3, 18},
}
for i, testCase := range tests {
rt.Update(Contact{testCase.id, net.ParseIP("127.0.0.1"), 8000 + i, 0})
if len(rt.buckets) != testCase.expectedBucketCount {
t.Errorf("failed test case %s. there should be %d buckets, got %d", testCase.name, testCase.expectedBucketCount, len(rt.buckets))
}
if rt.Count() != testCase.expectedTotalContacts {
t.Errorf("failed test case %s. there should be %d contacts, got %d", testCase.name, testCase.expectedTotalContacts, rt.Count())
}
}
var testRanges = []struct {
id bits.Bitmap
expected int
}{
{bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), 0},
{bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005"), 0},
{bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000410"), 1},
{bits.FromHexP("0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007f0"), 1},
{bits.FromHexP("F00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800"), 2},
{bits.FromHexP("F00000000000000000000000000000000000000000000000000F00000000000000000000000000000000000000000000"), 2},
{bits.FromHexP("F0000000000000000000000000000000F0000000000000000000000000F0000000000000000000000000000000000000"), 2},
}
for _, tt := range testRanges {
bucket := bucketNumFor(rt, tt.id)
if bucket != tt.expected {
t.Errorf("bucketFor(%s, %s) => got %d, expected %d", tt.id.Hex(), rt.id.Hex(), bucket, tt.expected)
}
}
}
func bucketNumFor(rt *routingTable, target bits.Bitmap) int {
if rt.id.Equals(target) {
panic("routing table does not have a bucket for its own id")
}
distance := target.Xor(rt.id)
for i := range rt.buckets {
if rt.buckets[i].Range.Contains(distance) {
return i
}
}
panic("target is not contained in any buckets")
}
func TestBucket_Split_Continuous(t *testing.T) {
b := newBucket(bits.MaxRange())
left, right := b.Split()
if !left.Range.Start.Equals(b.Range.Start) {
t.Errorf("left bucket start does not align with original bucket start. got %s, expected %s", left.Range.Start, b.Range.Start)
}
if !right.Range.End.Equals(b.Range.End) {
t.Errorf("right bucket end does not align with original bucket end. got %s, expected %s", right.Range.End, b.Range.End)
}
leftEndNext := (&big.Int{}).Add(left.Range.End.Big(), big.NewInt(1))
if !bits.FromBigP(leftEndNext).Equals(right.Range.Start) {
t.Errorf("there's a gap between left bucket end and right bucket start. end is %s, start is %s", left.Range.End, right.Range.Start)
}
}
func TestBucket_Split_KthClosest_DoSplit(t *testing.T) {
rt := newRoutingTable(bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"))
// add 4 low IDs
rt.Update(Contact{bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), net.ParseIP("127.0.0.1"), 8001, 0})
rt.Update(Contact{bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002"), net.ParseIP("127.0.0.1"), 8002, 0})
rt.Update(Contact{bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003"), net.ParseIP("127.0.0.1"), 8003, 0})
rt.Update(Contact{bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004"), net.ParseIP("127.0.0.1"), 8004, 0})
// add 4 high IDs
rt.Update(Contact{bits.FromHexP("800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8001, 0})
rt.Update(Contact{bits.FromHexP("900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8002, 0})
rt.Update(Contact{bits.FromHexP("a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8003, 0})
rt.Update(Contact{bits.FromHexP("b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8004, 0})
// split the bucket and fill the high bucket
rt.Update(Contact{bits.FromHexP("c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8005, 0})
rt.Update(Contact{bits.FromHexP("d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8006, 0})
rt.Update(Contact{bits.FromHexP("e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8007, 0})
rt.Update(Contact{bits.FromHexP("f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8008, 0})
// add a high ID. it should split because the high ID is closer than the Kth closest ID
rt.Update(Contact{bits.FromHexP("910000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.1"), 8009, 0})
if len(rt.buckets) != 3 {
t.Errorf("expected 3 buckets, got %d", len(rt.buckets))
}
if rt.Count() != 13 {
t.Errorf("expected 13 contacts, got %d", rt.Count())
}
}
func TestBucket_Split_KthClosest_DontSplit(t *testing.T) {
rt := newRoutingTable(bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"))
// add 4 low IDs
rt.Update(Contact{bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), net.ParseIP("127.0.0.1"), 8001, 0})
rt.Update(Contact{bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002"), net.ParseIP("127.0.0.1"), 8002, 0})
rt.Update(Contact{bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003"), net.ParseIP("127.0.0.1"), 8003, 0})
rt.Update(Contact{bits.FromHexP("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004"), net.ParseIP("127.0.0.1"), 8004, 0})
// add 4 high IDs
rt.Update(Contact{bits.FromHexP("800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8001, 0})
rt.Update(Contact{bits.FromHexP("900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8002, 0})
rt.Update(Contact{bits.FromHexP("a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8003, 0})
rt.Update(Contact{bits.FromHexP("b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8004, 0})
// split the bucket and fill the high bucket
rt.Update(Contact{bits.FromHexP("c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8005, 0})
rt.Update(Contact{bits.FromHexP("d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8006, 0})
rt.Update(Contact{bits.FromHexP("e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8007, 0})
rt.Update(Contact{bits.FromHexP("f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.2"), 8008, 0})
// add a really high ID. this should not split because its not closer than the Kth closest ID
rt.Update(Contact{bits.FromHexP("ffff00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), net.ParseIP("127.0.0.1"), 8009, 0})
if len(rt.buckets) != 2 {
t.Errorf("expected 2 buckets, got %d", len(rt.buckets))
}
if rt.Count() != 12 {
t.Errorf("expected 12 contacts, got %d", rt.Count())
}
}
func TestRoutingTable_GetClosest(t *testing.T) {
n1 := bits.FromHexP("FFFFFFFF0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
n2 := bits.FromHexP("FFFFFFF00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
n3 := bits.FromHexP("111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
rt := newRoutingTable(n1)
rt.Update(Contact{n2, net.ParseIP("127.0.0.1"), 8001, 0})
rt.Update(Contact{n3, net.ParseIP("127.0.0.1"), 8002, 0})
contacts := rt.GetClosest(bits.FromHexP("222222220000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 1)
if len(contacts) != 1 {
t.Fail()
return
}
if !contacts[0].ID.Equals(n3) {
t.Error(contacts[0])
}
contacts = rt.GetClosest(n2, 10)
if len(contacts) != 2 {
t.Error(len(contacts))
return
}
if !contacts[0].ID.Equals(n2) {
t.Error(contacts[0])
}
if !contacts[1].ID.Equals(n3) {
t.Error(contacts[1])
}
}
func TestRoutingTable_GetClosest_Empty(t *testing.T) {
n1 := bits.FromShortHexP("1")
rt := newRoutingTable(n1)
contacts := rt.GetClosest(bits.FromShortHexP("a"), 3)
if len(contacts) != 0 {
t.Error("there shouldn't be any contacts")
return
}
}
func TestRoutingTable_Refresh(t *testing.T) {
t.Skip("TODO: test routing table refreshing")
}
func TestRoutingTable_MoveToBack(t *testing.T) {
tt := map[string]struct {
data []peer
index int
expected []peer
}{
"simpleMove": {
data: []peer{{NumFailures: 0}, {NumFailures: 1}, {NumFailures: 2}, {NumFailures: 3}},
index: 1,
expected: []peer{{NumFailures: 0}, {NumFailures: 2}, {NumFailures: 3}, {NumFailures: 1}},
},
"moveFirst": {
data: []peer{{NumFailures: 0}, {NumFailures: 1}, {NumFailures: 2}, {NumFailures: 3}},
index: 0,
expected: []peer{{NumFailures: 1}, {NumFailures: 2}, {NumFailures: 3}, {NumFailures: 0}},
},
"moveLast": {
data: []peer{{NumFailures: 0}, {NumFailures: 1}, {NumFailures: 2}, {NumFailures: 3}},
index: 3,
expected: []peer{{NumFailures: 0}, {NumFailures: 1}, {NumFailures: 2}, {NumFailures: 3}},
},
"largeIndex": {
data: []peer{{NumFailures: 0}, {NumFailures: 1}, {NumFailures: 2}, {NumFailures: 3}},
index: 27,
expected: []peer{{NumFailures: 0}, {NumFailures: 1}, {NumFailures: 2}, {NumFailures: 3}},
},
"negativeIndex": {
data: []peer{{NumFailures: 0}, {NumFailures: 1}, {NumFailures: 2}, {NumFailures: 3}},
index: -12,
expected: []peer{{NumFailures: 0}, {NumFailures: 1}, {NumFailures: 2}, {NumFailures: 3}},
},
}
for name, test := range tt {
moveToBack(test.data, test.index)
expected := make([]string, len(test.expected))
actual := make([]string, len(test.data))
for i := range actual {
actual[i] = strconv.Itoa(test.data[i].NumFailures)
expected[i] = strconv.Itoa(test.expected[i].NumFailures)
}
expJoin := strings.Join(expected, ",")
actJoin := strings.Join(actual, ",")
if actJoin != expJoin {
t.Errorf("%s failed: got %s; expected %s", name, actJoin, expJoin)
}
}
}
func TestRoutingTable_Save(t *testing.T) {
t.Skip("fix me")
id := bits.FromHexP("1c8aff71b99462464d9eeac639595ab99664be3482cb91a29d87467515c7d9158fe72aa1f1582dab07d8f8b5db277f41")
rt := newRoutingTable(id)
for i, b := range rt.buckets {
for j := 0; j < bucketSize; j++ {
toAdd := b.Range.Start.Add(bits.FromShortHexP(strconv.Itoa(j)))
if toAdd.Cmp(b.Range.End) <= 0 {
rt.Update(Contact{
ID: b.Range.Start.Add(bits.FromShortHexP(strconv.Itoa(j))),
IP: net.ParseIP("1.2.3." + strconv.Itoa(j)),
Port: 1 + i*bucketSize + j,
})
}
}
}
data, err := json.MarshalIndent(rt, "", " ")
if err != nil {
t.Error(err)
}
goldie.Assert(t, t.Name(), data)
}
func TestRoutingTable_Load_ID(t *testing.T) {
t.Skip("fix me")
id := "1c8aff71b99462464d9eeac639595ab99664be3482cb91a29d87467515c7d9158fe72aa1f1582dab07d8f8b5db277f41"
data := []byte(`{"id": "` + id + `","contacts": []}`)
rt := routingTable{}
err := json.Unmarshal(data, &rt)
if err != nil {
t.Error(err)
}
if rt.id.Hex() != id {
t.Error("id mismatch")
}
}
func TestRoutingTable_Load_Contacts(t *testing.T) {
t.Skip("TODO")
}

View file

@ -1,597 +0,0 @@
package dht
import (
"container/heap"
"encoding/hex"
"fmt"
log "github.com/sirupsen/logrus"
"net"
"strings"
"sync"
"time"
)
// maxPrefixLength is the length of DHT node.
const maxPrefixLength = 160
const nodeIDLength = 48
const compactNodeInfoLength = nodeIDLength + 6
// node represents a DHT node.
type node struct {
id *bitmap
addr *net.UDPAddr
lastActiveTime time.Time
}
// newNode returns a node pointer.
func newNode(id, network, address string) (*node, error) {
if len(id) != nodeIDLength {
return nil, fmt.Errorf("node id should be a %d-length string", nodeIDLength)
}
addr, err := net.ResolveUDPAddr(network, address)
if err != nil {
return nil, err
}
return &node{newBitmapFromString(id), addr, time.Now()}, nil
}
// newNodeFromCompactInfo parses compactNodeInfo and returns a node pointer.
func newNodeFromCompactInfo(compactNodeInfo string, network string) (*node, error) {
if len(compactNodeInfo) != compactNodeInfoLength {
return nil, fmt.Errorf("compactNodeInfo should be a %d-length string", compactNodeInfoLength)
}
id := compactNodeInfo[:nodeIDLength]
ip, port, _ := decodeCompactIPPortInfo(compactNodeInfo[nodeIDLength:])
return newNode(id, network, genAddress(ip.String(), port))
}
// CompactIPPortInfo returns "Compact IP-address/port info".
// See http://www.bittorrent.org/beps/bep_0005.html.
func (node *node) CompactIPPortInfo() string {
info, _ := encodeCompactIPPortInfo(node.addr.IP, node.addr.Port)
return info
}
// CompactNodeInfo returns "Compact node info".
// See http://www.bittorrent.org/beps/bep_0005.html.
func (node *node) CompactNodeInfo() string {
return strings.Join([]string{
node.id.RawString(), node.CompactIPPortInfo(),
}, "")
}
func (node *node) HexID() string {
if node.id == nil {
return ""
}
return hex.EncodeToString([]byte(node.id.RawString()))
}
// Peer represents a peer contact.
type Peer struct {
IP net.IP
Port int
token string
}
// newPeer returns a new peer pointer.
func newPeer(ip net.IP, port int, token string) *Peer {
return &Peer{
IP: ip,
Port: port,
token: token,
}
}
// newPeerFromCompactIPPortInfo create a peer pointer by compact ip/port info.
func newPeerFromCompactIPPortInfo(compactInfo, token string) (*Peer, error) {
ip, port, err := decodeCompactIPPortInfo(compactInfo)
if err != nil {
return nil, err
}
return newPeer(ip, port, token), nil
}
// CompactIPPortInfo returns "Compact node info".
// See http://www.bittorrent.org/beps/bep_0005.html.
func (p *Peer) CompactIPPortInfo() string {
info, _ := encodeCompactIPPortInfo(p.IP, p.Port)
return info
}
// peersManager represents a proxy that manipulates peers.
type peersManager struct {
sync.RWMutex
table *syncedMap
dht *DHT
}
// newPeersManager returns a new peersManager.
func newPeersManager(dht *DHT) *peersManager {
return &peersManager{
table: newSyncedMap(),
dht: dht,
}
}
// Insert adds a peer into peersManager.
func (pm *peersManager) Insert(infoHash string, peer *Peer) {
pm.Lock()
if _, ok := pm.table.Get(infoHash); !ok {
pm.table.Set(infoHash, newKeyedDeque())
}
pm.Unlock()
v, _ := pm.table.Get(infoHash)
queue := v.(*keyedDeque)
queue.Push(peer.CompactIPPortInfo(), peer)
if queue.Len() > pm.dht.K {
queue.Remove(queue.Front())
}
}
// GetPeers returns size-length peers who announces having infoHash.
func (pm *peersManager) GetPeers(infoHash string, size int) []*Peer {
peers := make([]*Peer, 0, size)
v, ok := pm.table.Get(infoHash)
if !ok {
return peers
}
for e := range v.(*keyedDeque).Iter() {
peers = append(peers, e.Value.(*Peer))
}
if len(peers) > size {
peers = peers[len(peers)-size:]
}
return peers
}
// kbucket represents a k-size bucket.
type kbucket struct {
sync.RWMutex
nodes, candidates *keyedDeque
lastChanged time.Time
prefix *bitmap
}
// newKBucket returns a new kbucket pointer.
func newKBucket(prefix *bitmap) *kbucket {
bucket := &kbucket{
nodes: newKeyedDeque(),
candidates: newKeyedDeque(),
lastChanged: time.Now(),
prefix: prefix,
}
return bucket
}
// LastChanged return the last time when it changes.
func (bucket *kbucket) LastChanged() time.Time {
bucket.RLock()
defer bucket.RUnlock()
return bucket.lastChanged
}
// RandomChildID returns a random id that has the same prefix with bucket.
func (bucket *kbucket) RandomChildID() string {
prefixLen := bucket.prefix.Size / 8
return strings.Join([]string{
bucket.prefix.RawString()[:prefixLen],
randomString(nodeIDLength - prefixLen),
}, "")
}
// UpdateTimestamp update bucket's last changed time..
func (bucket *kbucket) UpdateTimestamp() {
bucket.Lock()
defer bucket.Unlock()
bucket.lastChanged = time.Now()
}
// Insert inserts node to the bucket. It returns whether the node is new in
// the bucket.
func (bucket *kbucket) Insert(no *node) bool {
isNew := !bucket.nodes.HasKey(no.id.RawString())
bucket.nodes.Push(no.id.RawString(), no)
bucket.UpdateTimestamp()
return isNew
}
// Replace removes node, then put bucket.candidates.Back() to the right
// place of bucket.nodes.
func (bucket *kbucket) Replace(no *node) {
bucket.nodes.Delete(no.id.RawString())
bucket.UpdateTimestamp()
if bucket.candidates.Len() == 0 {
return
}
no = bucket.candidates.Remove(bucket.candidates.Back()).(*node)
inserted := false
for e := range bucket.nodes.Iter() {
if e.Value.(*node).lastActiveTime.After(
no.lastActiveTime) && !inserted {
bucket.nodes.InsertBefore(no, e)
inserted = true
}
}
if !inserted {
bucket.nodes.PushBack(no)
}
}
// Fresh pings the expired nodes in the bucket.
func (bucket *kbucket) Fresh(dht *DHT) {
for e := range bucket.nodes.Iter() {
no := e.Value.(*node)
if time.Since(no.lastActiveTime) > dht.NodeExpriedAfter {
dht.transactionManager.ping(no)
}
}
}
// routingTableNode represents routing table tree node.
type routingTableNode struct {
sync.RWMutex
children []*routingTableNode
bucket *kbucket
}
// newRoutingTableNode returns a new routingTableNode pointer.
func newRoutingTableNode(prefix *bitmap) *routingTableNode {
return &routingTableNode{
children: make([]*routingTableNode, 2),
bucket: newKBucket(prefix),
}
}
// Child returns routingTableNode's left or right child.
func (tableNode *routingTableNode) Child(index int) *routingTableNode {
if index >= 2 {
return nil
}
tableNode.RLock()
defer tableNode.RUnlock()
return tableNode.children[index]
}
// SetChild sets routingTableNode's left or right child. When index is 0, it's
// the left child, if 1, it's the right child.
func (tableNode *routingTableNode) SetChild(index int, c *routingTableNode) {
tableNode.Lock()
defer tableNode.Unlock()
tableNode.children[index] = c
}
// KBucket returns the bucket routingTableNode holds.
func (tableNode *routingTableNode) KBucket() *kbucket {
tableNode.RLock()
defer tableNode.RUnlock()
return tableNode.bucket
}
// SetKBucket sets the bucket.
func (tableNode *routingTableNode) SetKBucket(bucket *kbucket) {
tableNode.Lock()
defer tableNode.Unlock()
tableNode.bucket = bucket
}
// Split splits current routingTableNode and sets it's two children.
func (tableNode *routingTableNode) Split() {
prefixLen := tableNode.KBucket().prefix.Size
if prefixLen == maxPrefixLength {
return
}
for i := 0; i < 2; i++ {
tableNode.SetChild(i, newRoutingTableNode(newBitmapFrom(
tableNode.KBucket().prefix, prefixLen+1)))
}
tableNode.Lock()
tableNode.children[1].bucket.prefix.Set(prefixLen)
tableNode.Unlock()
for e := range tableNode.KBucket().nodes.Iter() {
nd := e.Value.(*node)
tableNode.Child(nd.id.Bit(prefixLen)).KBucket().nodes.PushBack(nd)
}
for e := range tableNode.KBucket().candidates.Iter() {
nd := e.Value.(*node)
tableNode.Child(nd.id.Bit(prefixLen)).KBucket().candidates.PushBack(nd)
}
for i := 0; i < 2; i++ {
tableNode.Child(i).KBucket().UpdateTimestamp()
}
}
// routingTable implements the routing table in DHT protocol.
type routingTable struct {
*sync.RWMutex
k int
root *routingTableNode
cachedNodes *syncedMap
cachedKBuckets *keyedDeque
dht *DHT
clearQueue *syncedList
}
// newRoutingTable returns a new routingTable pointer.
func newRoutingTable(k int, dht *DHT) *routingTable {
root := newRoutingTableNode(newBitmap(0))
rt := &routingTable{
RWMutex: &sync.RWMutex{},
k: k,
root: root,
cachedNodes: newSyncedMap(),
cachedKBuckets: newKeyedDeque(),
dht: dht,
clearQueue: newSyncedList(),
}
rt.cachedKBuckets.Push(root.bucket.prefix.String(), root.bucket)
return rt
}
// Insert adds a node to routing table. It returns whether the node is new
// in the routingtable.
func (rt *routingTable) Insert(nd *node) bool {
rt.Lock()
defer rt.Unlock()
log.Infof("Adding node to routing table: %s (%s:%d)", nd.id.RawString(), nd.addr.IP, nd.addr.Port)
var (
next *routingTableNode
bucket *kbucket
)
root := rt.root
for prefixLen := 1; prefixLen <= maxPrefixLength; prefixLen++ {
next = root.Child(nd.id.Bit(prefixLen - 1))
if next != nil {
// If next is not the leaf.
root = next
} else if root.KBucket().nodes.Len() < rt.k ||
root.KBucket().nodes.HasKey(nd.id.RawString()) {
bucket = root.KBucket()
isNew := bucket.Insert(nd)
rt.cachedNodes.Set(nd.addr.String(), nd)
rt.cachedKBuckets.Push(bucket.prefix.String(), bucket)
return isNew
} else if root.KBucket().prefix.Compare(nd.id, prefixLen-1) == 0 {
// If node has the same prefix with bucket, split it.
root.Split()
rt.cachedKBuckets.Delete(root.KBucket().prefix.String())
root.SetKBucket(nil)
for i := 0; i < 2; i++ {
bucket = root.Child(i).KBucket()
rt.cachedKBuckets.Push(bucket.prefix.String(), bucket)
}
root = root.Child(nd.id.Bit(prefixLen - 1))
} else {
// Finally, store node as a candidate and fresh the bucket.
root.KBucket().candidates.PushBack(nd)
if root.KBucket().candidates.Len() > rt.k {
root.KBucket().candidates.Remove(
root.KBucket().candidates.Front())
}
go root.KBucket().Fresh(rt.dht)
return false
}
}
return false
}
// GetNeighbors returns the size-length nodes closest to id.
func (rt *routingTable) GetNeighbors(id *bitmap, size int) []*node {
rt.RLock()
nodes := make([]interface{}, 0, rt.cachedNodes.Len())
for item := range rt.cachedNodes.Iter() {
nodes = append(nodes, item.val.(*node))
}
rt.RUnlock()
neighbors := getTopK(nodes, id, size)
result := make([]*node, len(neighbors))
for i, nd := range neighbors {
result[i] = nd.(*node)
}
return result
}
// GetNeighborIds return the size-length compact node info closest to id.
func (rt *routingTable) GetNeighborCompactInfos(id *bitmap, size int) []string {
neighbors := rt.GetNeighbors(id, size)
infos := make([]string, len(neighbors))
for i, no := range neighbors {
infos[i] = no.CompactNodeInfo()
}
return infos
}
// GetNodeKBucktById returns node whose id is `id` and the bucket it
// belongs to.
func (rt *routingTable) GetNodeKBucktByID(id *bitmap) (
nd *node, bucket *kbucket) {
rt.RLock()
defer rt.RUnlock()
var next *routingTableNode
root := rt.root
for prefixLen := 1; prefixLen <= maxPrefixLength; prefixLen++ {
next = root.Child(id.Bit(prefixLen - 1))
if next == nil {
v, ok := root.KBucket().nodes.Get(id.RawString())
if !ok {
return
}
nd, bucket = v.Value.(*node), root.KBucket()
return
}
root = next
}
return
}
// GetNodeByAddress finds node by address.
func (rt *routingTable) GetNodeByAddress(address string) (no *node, ok bool) {
rt.RLock()
defer rt.RUnlock()
v, ok := rt.cachedNodes.Get(address)
if ok {
no = v.(*node)
}
return
}
// Remove deletes the node whose id is `id`.
func (rt *routingTable) Remove(id *bitmap) {
if nd, bucket := rt.GetNodeKBucktByID(id); nd != nil {
bucket.Replace(nd)
rt.cachedNodes.Delete(nd.addr.String())
rt.cachedKBuckets.Push(bucket.prefix.String(), bucket)
}
}
// Remove deletes the node whose address is `ip:port`.
func (rt *routingTable) RemoveByAddr(address string) {
v, ok := rt.cachedNodes.Get(address)
if ok {
rt.Remove(v.(*node).id)
}
}
// Fresh sends findNode to all nodes in the expired nodes.
func (rt *routingTable) Fresh() {
now := time.Now()
for e := range rt.cachedKBuckets.Iter() {
bucket := e.Value.(*kbucket)
if now.Sub(bucket.LastChanged()) < rt.dht.KBucketExpiredAfter ||
bucket.nodes.Len() == 0 {
continue
}
i := 0
for e := range bucket.nodes.Iter() {
if i < rt.dht.RefreshNodeNum {
no := e.Value.(*node)
rt.dht.transactionManager.findNode(no, bucket.RandomChildID())
rt.clearQueue.PushBack(no)
}
i++
}
}
rt.clearQueue.Clear()
}
// Len returns the number of nodes in table.
func (rt *routingTable) Len() int {
rt.RLock()
defer rt.RUnlock()
return rt.cachedNodes.Len()
}
// Implementation of heap with heap.Interface.
type heapItem struct {
distance *bitmap
value interface{}
}
type topKHeap []*heapItem
func (kHeap topKHeap) Len() int {
return len(kHeap)
}
func (kHeap topKHeap) Less(i, j int) bool {
return kHeap[i].distance.Compare(kHeap[j].distance, maxPrefixLength) == 1
}
func (kHeap topKHeap) Swap(i, j int) {
kHeap[i], kHeap[j] = kHeap[j], kHeap[i]
}
func (kHeap *topKHeap) Push(x interface{}) {
*kHeap = append(*kHeap, x.(*heapItem))
}
func (kHeap *topKHeap) Pop() interface{} {
n := len(*kHeap)
x := (*kHeap)[n-1]
*kHeap = (*kHeap)[:n-1]
return x
}
// getTopK solves the top-k problem with heap. It's time complexity is
// O(n*log(k)). When n is large, time complexity will be too high, need to be
// optimized.
func getTopK(queue []interface{}, id *bitmap, k int) []interface{} {
topkHeap := make(topKHeap, 0, k+1)
for _, value := range queue {
node := value.(*node)
item := &heapItem{
id.Xor(node.id),
value,
}
heap.Push(&topkHeap, item)
if topkHeap.Len() > k {
heap.Pop(&topkHeap)
}
}
tops := make([]interface{}, topkHeap.Len())
for i := len(tops) - 1; i >= 0; i-- {
tops[i] = heap.Pop(&topkHeap).(*heapItem).value
}
return tops
}

195
dht/rpc.go Normal file
View file

@ -0,0 +1,195 @@
package dht
import (
"context"
"net"
"net/http"
"strconv"
"sync"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/gorilla/mux"
rpc2 "github.com/gorilla/rpc/v2"
"github.com/gorilla/rpc/v2/json"
)
type rpcReceiver struct {
dht *DHT
}
type RpcPingArgs struct {
Address string
}
func (rpc *rpcReceiver) Ping(r *http.Request, args *RpcPingArgs, result *string) error {
if args.Address == "" {
return errors.Err("no address given")
}
err := rpc.dht.Ping(args.Address)
if err != nil {
return err
}
*result = pingSuccessResponse
return nil
}
type RpcFindArgs struct {
Key string
NodeID string
IP string
Port int
}
func (rpc *rpcReceiver) FindNode(r *http.Request, args *RpcFindArgs, result *[]Contact) error {
key, err := bits.FromHex(args.Key)
if err != nil {
return err
}
toQuery, err := bits.FromHex(args.NodeID)
if err != nil {
return err
}
c := Contact{ID: toQuery, IP: net.ParseIP(args.IP), Port: args.Port}
req := Request{Method: findNodeMethod, Arg: &key}
nodeResponse := rpc.dht.node.Send(c, req)
if nodeResponse != nil && nodeResponse.Contacts != nil {
*result = nodeResponse.Contacts
}
return nil
}
type RpcFindValueResult struct {
Contacts []Contact
Value string
}
func (rpc *rpcReceiver) FindValue(r *http.Request, args *RpcFindArgs, result *RpcFindValueResult) error {
key, err := bits.FromHex(args.Key)
if err != nil {
return err
}
toQuery, err := bits.FromHex(args.NodeID)
if err != nil {
return err
}
c := Contact{ID: toQuery, IP: net.ParseIP(args.IP), Port: args.Port}
req := Request{Arg: &key, Method: findValueMethod}
nodeResponse := rpc.dht.node.Send(c, req)
if nodeResponse != nil && nodeResponse.FindValueKey != "" {
*result = RpcFindValueResult{Value: nodeResponse.FindValueKey}
return nil
}
if nodeResponse != nil && nodeResponse.Contacts != nil {
*result = RpcFindValueResult{Contacts: nodeResponse.Contacts}
return nil
}
return errors.Err("not sure what happened")
}
type RpcIterativeFindValueArgs struct {
Key string
}
type RpcIterativeFindValueResult struct {
Contacts []Contact
FoundValue bool
Values []Contact
}
func (rpc *rpcReceiver) IterativeFindValue(r *http.Request, args *RpcIterativeFindValueArgs, result *RpcIterativeFindValueResult) error {
key, err := bits.FromHex(args.Key)
if err != nil {
return err
}
foundContacts, found, err := FindContacts(rpc.dht.node, key, true, nil)
if err != nil {
return err
}
result.Contacts = foundContacts
result.FoundValue = found
if found {
for _, contact := range foundContacts {
if contact.PeerPort > 0 {
result.Values = append(result.Values, contact)
}
}
}
return nil
}
type RpcBucketResponse struct {
Start string
End string
NumContacts int
Contacts []Contact
}
type RpcRoutingTableResponse struct {
NodeID string
NumBuckets int
Buckets []RpcBucketResponse
}
func (rpc *rpcReceiver) GetRoutingTable(r *http.Request, args *struct{}, result *RpcRoutingTableResponse) error {
result.NodeID = rpc.dht.node.id.String()
result.NumBuckets = len(rpc.dht.node.rt.buckets)
for _, b := range rpc.dht.node.rt.buckets {
result.Buckets = append(result.Buckets, RpcBucketResponse{
Start: b.Range.Start.String(),
End: b.Range.End.String(),
NumContacts: b.Len(),
Contacts: b.Contacts(),
})
}
return nil
}
func (rpc *rpcReceiver) AddKnownNode(r *http.Request, args *Contact, result *string) error {
rpc.dht.node.AddKnownNode(*args)
return nil
}
func (dht *DHT) runRPCServer(port int) {
addr := "0.0.0.0:" + strconv.Itoa(port)
s := rpc2.NewServer()
s.RegisterCodec(json.NewCodec(), "application/json")
s.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8")
err := s.RegisterService(&rpcReceiver{dht: dht}, "rpc")
if err != nil {
log.Error(errors.Prefix("registering rpc service", err))
return
}
handler := mux.NewRouter()
handler.Handle("/", s)
server := &http.Server{Addr: addr, Handler: handler}
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
log.Printf("RPC server listening on %s", addr)
err := server.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
log.Error(err)
}
}()
<-dht.grp.Ch()
err = server.Shutdown(context.Background())
if err != nil {
log.Error(errors.Prefix("shutting down rpc service", err))
return
}
wg.Wait()
}

62
dht/store.go Normal file
View file

@ -0,0 +1,62 @@
package dht
import (
"sync"
"github.com/lbryio/lbry.go/v2/dht/bits"
)
// TODO: expire stored data after tExpire time
type contactStore struct {
// map of blob hashes to (map of node IDs to bools)
hashes map[bits.Bitmap]map[bits.Bitmap]bool
// stores the peers themselves, so they can be updated in one place
contacts map[bits.Bitmap]Contact
lock sync.RWMutex
}
func newStore() *contactStore {
return &contactStore{
hashes: make(map[bits.Bitmap]map[bits.Bitmap]bool),
contacts: make(map[bits.Bitmap]Contact),
}
}
func (s *contactStore) Upsert(blobHash bits.Bitmap, contact Contact) {
s.lock.Lock()
defer s.lock.Unlock()
if _, ok := s.hashes[blobHash]; !ok {
s.hashes[blobHash] = make(map[bits.Bitmap]bool)
}
s.hashes[blobHash][contact.ID] = true
s.contacts[contact.ID] = contact
}
func (s *contactStore) Get(blobHash bits.Bitmap) []Contact {
s.lock.RLock()
defer s.lock.RUnlock()
var contacts []Contact
if ids, ok := s.hashes[blobHash]; ok {
for id := range ids {
contact, ok := s.contacts[id]
if !ok {
panic("node id in IDs list, but not in nodeInfo")
}
contacts = append(contacts, contact)
}
}
return contacts
}
func (s *contactStore) RemoveTODO(contact Contact) {
// TODO: remove peer from everywhere
}
func (s *contactStore) CountStoredHashes() int {
s.lock.RLock()
defer s.lock.RUnlock()
return len(s.hashes)
}

312
dht/testing.go Normal file
View file

@ -0,0 +1,312 @@
package dht
import (
"net"
"strconv"
"strings"
"testing"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/errors"
)
var testingDHTIP = "127.0.0.1"
var testingDHTFirstPort = 21000
// TestingCreateNetwork initializes a testable DHT network with a specific number of nodes, with bootstrap and concurrent options.
func TestingCreateNetwork(t *testing.T, numNodes int, bootstrap, concurrent bool) (*BootstrapNode, []*DHT) {
var bootstrapNode *BootstrapNode
var seeds []string
if bootstrap {
bootstrapAddress := testingDHTIP + ":" + strconv.Itoa(testingDHTFirstPort)
seeds = []string{bootstrapAddress}
bootstrapNode = NewBootstrapNode(bits.Rand(), 0, bootstrapDefaultRefreshDuration)
listener, err := net.ListenPacket(Network, bootstrapAddress)
if err != nil {
panic(err)
}
err = bootstrapNode.Connect(listener.(*net.UDPConn))
if err != nil {
t.Error("error connecting bootstrap node - ", err)
}
}
if numNodes < 1 {
return bootstrapNode, nil
}
firstPort := testingDHTFirstPort + 1
dhts := make([]*DHT, numNodes)
for i := 0; i < numNodes; i++ {
c := NewStandardConfig()
c.NodeID = bits.Rand().Hex()
c.Address = testingDHTIP + ":" + strconv.Itoa(firstPort+i)
c.SeedNodes = seeds
dht := New(c)
go func() {
err := dht.Start()
if err != nil {
t.Error("error starting dht - ", err)
}
}()
if !concurrent {
dht.WaitUntilJoined()
}
dhts[i] = dht
}
if concurrent {
for _, d := range dhts {
d.WaitUntilJoined()
}
}
return bootstrapNode, dhts
}
type timeoutErr struct {
error
}
func (t timeoutErr) Timeout() bool {
return true
}
func (t timeoutErr) Temporary() bool {
return true
}
// TODO: just use a normal net.Conn instead of this mock conn
type testUDPPacket struct {
data []byte
addr *net.UDPAddr
}
type testUDPConn struct {
addr *net.UDPAddr
toRead chan testUDPPacket
writes chan testUDPPacket
readDeadline time.Time
}
func newTestUDPConn(addr string) *testUDPConn {
parts := strings.Split(addr, ":")
if len(parts) != 2 {
panic("addr needs ip and port")
}
port, err := strconv.Atoi(parts[1])
if err != nil {
panic(err)
}
return &testUDPConn{
addr: &net.UDPAddr{IP: net.IP(parts[0]), Port: port},
toRead: make(chan testUDPPacket),
writes: make(chan testUDPPacket),
}
}
func (t testUDPConn) ReadFromUDP(b []byte) (int, *net.UDPAddr, error) {
var timeoutCh <-chan time.Time
if !t.readDeadline.IsZero() {
timeoutCh = time.After(time.Until(t.readDeadline))
}
select {
case packet, ok := <-t.toRead:
if !ok {
return 0, nil, errors.Err("conn closed")
}
n := copy(b, packet.data)
return n, packet.addr, nil
case <-timeoutCh:
return 0, nil, timeoutErr{errors.Err("timeout")}
}
}
func (t testUDPConn) WriteToUDP(b []byte, addr *net.UDPAddr) (int, error) {
t.writes <- testUDPPacket{data: b, addr: addr}
return len(b), nil
}
func (t *testUDPConn) SetReadDeadline(tm time.Time) error {
t.readDeadline = tm
return nil
}
func (t *testUDPConn) SetWriteDeadline(tm time.Time) error {
return nil
}
func (t *testUDPConn) Close() error {
close(t.toRead)
t.writes = nil
return nil
}
func verifyResponse(t *testing.T, resp map[string]interface{}, id messageID, dhtNodeID string) {
if len(resp) != 4 {
t.Errorf("expected 4 response fields, got %d", len(resp))
}
_, ok := resp[headerTypeField]
if !ok {
t.Error("missing type field")
} else {
rType, ok := resp[headerTypeField].(int64)
if !ok {
t.Error("type is not an integer")
} else if rType != responseType {
t.Error("unexpected response type")
}
}
_, ok = resp[headerMessageIDField]
if !ok {
t.Error("missing message id field")
} else {
rMessageID, ok := resp[headerMessageIDField].(string)
if !ok {
t.Error("message ID is not a string")
} else if rMessageID != string(id[:]) {
t.Error("unexpected message ID")
}
if len(rMessageID) != messageIDLength {
t.Errorf("message ID should be %d chars long", messageIDLength)
}
}
_, ok = resp[headerNodeIDField]
if !ok {
t.Error("missing node id field")
} else {
rNodeID, ok := resp[headerNodeIDField].(string)
if !ok {
t.Error("node ID is not a string")
} else if rNodeID != dhtNodeID {
t.Error("unexpected node ID")
}
if len(rNodeID) != nodeIDLength {
t.Errorf("node ID should be %d chars long", nodeIDLength)
}
}
}
func verifyContacts(t *testing.T, contacts []interface{}, nodes []Contact) {
if len(contacts) != len(nodes) {
t.Errorf("got %d contacts; expected %d", len(contacts), len(nodes))
return
}
foundNodes := make(map[string]bool)
for _, c := range contacts {
contact, ok := c.([]interface{})
if !ok {
t.Error("contact is not a list")
return
}
if len(contact) != 3 {
t.Error("contact must be 3 items")
return
}
var currNode Contact
currNodeFound := false
id, ok := contact[0].(string)
if !ok {
t.Error("contact id is not a string")
} else {
if _, ok := foundNodes[id]; ok {
t.Errorf("contact %s appears multiple times", id)
continue
}
for _, n := range nodes {
if n.ID.RawString() == id {
currNode = n
currNodeFound = true
foundNodes[id] = true
break
}
}
if !currNodeFound {
t.Errorf("unexpected contact %s", id)
continue
}
}
ip, ok := contact[1].(string)
if !ok {
t.Error("contact IP is not a string")
} else if !currNode.IP.Equal(net.ParseIP(ip)) {
t.Errorf("contact IP mismatch. got %s; expected %s", ip, currNode.IP.String())
}
port, ok := contact[2].(int64)
if !ok {
t.Error("contact port is not an int")
} else if int(port) != currNode.Port {
t.Errorf("contact port mismatch. got %d; expected %d", port, currNode.Port)
}
}
}
func verifyCompactContacts(t *testing.T, contacts []interface{}, nodes []Contact) {
if len(contacts) != len(nodes) {
t.Errorf("got %d contacts; expected %d", len(contacts), len(nodes))
return
}
foundNodes := make(map[string]bool)
for _, c := range contacts {
compact, ok := c.(string)
if !ok {
t.Error("contact is not a string")
return
}
contact := Contact{}
err := contact.UnmarshalCompact([]byte(compact))
if err != nil {
t.Error(err)
return
}
var currNode Contact
currNodeFound := false
if _, ok := foundNodes[contact.ID.Hex()]; ok {
t.Errorf("contact %s appears multiple times", contact.ID.Hex())
continue
}
for _, n := range nodes {
if n.ID.Equals(contact.ID) {
currNode = n
currNodeFound = true
foundNodes[contact.ID.Hex()] = true
break
}
}
if !currNodeFound {
t.Errorf("unexpected contact %s", contact.ID.Hex())
continue
}
if !currNode.IP.Equal(contact.IP) {
t.Errorf("contact IP mismatch. got %s; expected %s", contact.IP.String(), currNode.IP.String())
}
if contact.Port != currNode.Port {
t.Errorf("contact port mismatch. got %d; expected %d", contact.Port, currNode.Port)
}
}
}

70
dht/token_cache.go Normal file
View file

@ -0,0 +1,70 @@
package dht
import (
"sync"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/stop"
)
// TODO: this should be moved out of dht and into node, and it should be completely hidden inside node. dht should not need to know about tokens
type tokenCacheEntry struct {
token string
receivedAt time.Time
}
type tokenCache struct {
node *Node
tokens map[string]tokenCacheEntry
expiration time.Duration
lock *sync.RWMutex
}
func newTokenCache(node *Node, expiration time.Duration) *tokenCache {
tc := &tokenCache{}
tc.node = node
tc.tokens = make(map[string]tokenCacheEntry)
tc.expiration = expiration
tc.lock = &sync.RWMutex{}
return tc
}
// TODO: if store fails, get new token. can happen if a node restarts but we have the token cached
func (tc *tokenCache) Get(c Contact, hash bits.Bitmap, cancelCh stop.Chan) string {
tc.lock.RLock()
token, exists := tc.tokens[c.String()]
tc.lock.RUnlock()
if exists && time.Since(token.receivedAt) < tc.expiration {
return token.token
}
resCh := tc.node.SendAsync(c, Request{
Method: findValueMethod,
Arg: &hash,
})
var res *Response
select {
case res = <-resCh:
case <-cancelCh:
return ""
}
if res == nil {
return ""
}
tc.lock.Lock()
tc.tokens[c.String()] = tokenCacheEntry{
token: res.Token,
receivedAt: time.Now(),
}
tc.lock.Unlock()
return res.Token
}

78
dht/token_manager.go Normal file
View file

@ -0,0 +1,78 @@
package dht
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"net"
"strconv"
"sync"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/stop"
)
type tokenManager struct {
secret []byte
prevSecret []byte
lock *sync.RWMutex
stop *stop.Group
}
func (tm *tokenManager) Start(interval time.Duration) {
tm.secret = make([]byte, 64)
tm.prevSecret = make([]byte, 64)
tm.lock = &sync.RWMutex{}
tm.stop = stop.New()
tm.rotateSecret()
tm.stop.Add(1)
go func() {
defer tm.stop.Done()
tick := time.NewTicker(interval)
for {
select {
case <-tick.C:
tm.rotateSecret()
case <-tm.stop.Ch():
return
}
}
}()
}
func (tm *tokenManager) Stop() {
tm.stop.StopAndWait()
}
func (tm *tokenManager) Get(nodeID bits.Bitmap, addr *net.UDPAddr) string {
return genToken(tm.secret, nodeID, addr)
}
func (tm *tokenManager) Verify(token string, nodeID bits.Bitmap, addr *net.UDPAddr) bool {
return token == genToken(tm.secret, nodeID, addr) || token == genToken(tm.prevSecret, nodeID, addr)
}
func genToken(secret []byte, nodeID bits.Bitmap, addr *net.UDPAddr) string {
buf := bytes.Buffer{}
buf.Write(nodeID[:])
buf.Write(addr.IP)
buf.WriteString(strconv.Itoa(addr.Port))
buf.Write(secret)
t := sha256.Sum256(buf.Bytes())
return string(t[:])
}
func (tm *tokenManager) rotateSecret() {
tm.lock.Lock()
defer tm.lock.Unlock()
copy(tm.prevSecret, tm.secret)
_, err := rand.Read(tm.secret)
if err != nil {
panic(err)
}
}

View file

@ -1,133 +0,0 @@
package dht
import (
"crypto/rand"
"errors"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"time"
)
// randomString generates a size-length string randomly.
func randomString(size int) string {
buff := make([]byte, size)
rand.Read(buff)
return string(buff)
}
// bytes2int returns the int value it represents.
func bytes2int(data []byte) uint64 {
n, val := len(data), uint64(0)
if n > 8 {
panic("data too long")
}
for i, b := range data {
val += uint64(b) << uint64((n-i-1)*8)
}
return val
}
// int2bytes returns the byte array it represents.
func int2bytes(val uint64) []byte {
data, j := make([]byte, 8), -1
for i := 0; i < 8; i++ {
shift := uint64((7 - i) * 8)
data[i] = byte((val & (0xff << shift)) >> shift)
if j == -1 && data[i] != 0 {
j = i
}
}
if j != -1 {
return data[j:]
}
return data[:1]
}
// decodeCompactIPPortInfo decodes compactIP-address/port info in BitTorrent
// DHT Protocol. It returns the ip and port number.
func decodeCompactIPPortInfo(info string) (ip net.IP, port int, err error) {
if len(info) != 6 {
err = errors.New("compact info should be 6-length long")
return
}
ip = net.IPv4(info[0], info[1], info[2], info[3])
port = int((uint16(info[4]) << 8) | uint16(info[5]))
return
}
// encodeCompactIPPortInfo encodes an ip and a port number to
// compactIP-address/port info.
func encodeCompactIPPortInfo(ip net.IP, port int) (info string, err error) {
if port > 65535 || port < 0 {
err = errors.New("port should be no greater than 65535 and no less than 0")
return
}
p := int2bytes(uint64(port))
if len(p) < 2 {
p = append(p, p[0])
p[0] = 0
}
info = string(append(ip, p...))
return
}
// getLocalIPs returns local ips.
func getLocalIPs() (ips []string) {
ips = make([]string, 0, 6)
addrs, err := net.InterfaceAddrs()
if err != nil {
return
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
ips = append(ips, ip.String())
}
return
}
// getRemoteIP returns the wlan ip.
func getRemoteIP() (ip string, err error) {
client := &http.Client{
Timeout: time.Second * 30,
}
req, err := http.NewRequest("GET", "http://ifconfig.me", nil)
if err != nil {
return
}
req.Header.Set("User-Agent", "curl")
res, err := client.Do(req)
if err != nil {
return
}
defer res.Body.Close()
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return
}
ip = string(data)
return
}
// genAddress returns a ip:port address.
func genAddress(ip string, port int) string {
return strings.Join([]string{ip, strconv.Itoa(port)}, ":")
}

View file

@ -1,100 +0,0 @@
package dht
import (
"testing"
)
func TestInt2Bytes(t *testing.T) {
cases := []struct {
in uint64
out []byte
}{
{0, []byte{0}},
{1, []byte{1}},
{256, []byte{1, 0}},
{22129, []byte{86, 113}},
}
for _, c := range cases {
r := int2bytes(c.in)
if len(r) != len(c.out) {
t.Fail()
}
for i, v := range r {
if v != c.out[i] {
t.Fail()
}
}
}
}
func TestBytes2Int(t *testing.T) {
cases := []struct {
in []byte
out uint64
}{
{[]byte{0}, 0},
{[]byte{1}, 1},
{[]byte{1, 0}, 256},
{[]byte{86, 113}, 22129},
}
for _, c := range cases {
if bytes2int(c.in) != c.out {
t.Fail()
}
}
}
func TestDecodeCompactIPPortInfo(t *testing.T) {
cases := []struct {
in string
out struct {
ip string
port int
}
}{
{"123456", struct {
ip string
port int
}{"49.50.51.52", 13622}},
{"abcdef", struct {
ip string
port int
}{"97.98.99.100", 25958}},
}
for _, item := range cases {
ip, port, err := decodeCompactIPPortInfo(item.in)
if err != nil || ip.String() != item.out.ip || port != item.out.port {
t.Fail()
}
}
}
func TestEncodeCompactIPPortInfo(t *testing.T) {
cases := []struct {
in struct {
ip []byte
port int
}
out string
}{
{struct {
ip []byte
port int
}{[]byte{49, 50, 51, 52}, 13622}, "123456"},
{struct {
ip []byte
port int
}{[]byte{97, 98, 99, 100}, 25958}, "abcdef"},
}
for _, item := range cases {
info, err := encodeCompactIPPortInfo(item.in.ip, item.in.port)
if err != nil || info != item.out {
t.Fail()
}
}
}

View file

@ -6,9 +6,9 @@ import (
"reflect"
"strings"
"github.com/lbryio/lbry.go/errors"
"github.com/lbryio/lbry.go/util"
"github.com/lbryio/lbry.go/validator"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/lbryio/lbry.go/v2/extras/validator"
v "github.com/lbryio/ozzo-validation"
"github.com/spf13/cast"
@ -17,9 +17,28 @@ import (
// ResponseHeaders are returned with each response
var ResponseHeaders map[string]string
// CorsDomains Allowed domains for CORS Policy
var CorsDomains []string
// CorsAllowLocalhost if true localhost connections are always allowed
var CorsAllowLocalhost bool
// Log allows logging of events and errors
var Log = func(*http.Request, *Response, error) {}
// http://choly.ca/post/go-json-marshalling/
type ResponseInfo struct {
Success bool `json:"success"`
Error *string `json:"error"`
Data interface{} `json:"data"`
Trace []string `json:"_trace,omitempty"`
}
// BuildJSONResponse allows implementers to control the json response form from the api
var BuildJSONResponse = func(response ResponseInfo) ([]byte, error) {
return json.MarshalIndent(&response, "", " ")
}
// TraceEnabled Attaches a trace field to the JSON response when enabled.
var TraceEnabled = false
@ -64,6 +83,32 @@ func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set(key, value)
}
}
origin := r.Header.Get("origin")
for _, d := range CorsDomains {
if d == origin {
w.Header().Set("Access-Control-Allow-Origin", d)
vary := w.Header().Get("Vary")
if vary != "*" {
if vary != "" {
vary += ", "
}
vary += "Origin"
}
w.Header().Set("Vary", vary)
}
}
if CorsAllowLocalhost && strings.HasPrefix(origin, "http://localhost:") {
w.Header().Set("Access-Control-Allow-Origin", origin)
vary := w.Header().Get("Vary")
if vary != "*" {
if vary != "" {
vary += ", "
}
vary += "Origin"
}
w.Header().Set("Vary", vary)
}
// Stop here if its a preflighted OPTIONS request
if r.Method == "OPTIONS" {
@ -76,6 +121,9 @@ func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if rsp.Error != nil {
ogErr := errors.Unwrap(rsp.Error)
if statusError, ok := ogErr.(StatusError); ok {
if statusError.Status == 0 {
statusError.Status = http.StatusInternalServerError
}
rsp.Status = statusError.Status
} else {
rsp.Status = http.StatusInternalServerError
@ -113,32 +161,40 @@ func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var trace []string
if TraceEnabled && errors.HasTrace(rsp.Error) {
trace = strings.Split(errors.Trace(rsp.Error), "\n")
trace = getTraceFromError(rsp.Error)
}
jsonResponse, err := BuildJSONResponse(ResponseInfo{
Success: success,
Error: errorString,
Data: rsp.Data,
Trace: trace,
})
if err != nil {
Log(r, &rsp, errors.Prefix("Error encoding JSON response: ", err))
jsonResponse, err = BuildJSONResponse(ResponseInfo{
Success: false,
Error: util.PtrToString(err.Error()),
Data: nil,
Trace: getTraceFromError(err),
})
if err != nil {
Log(r, &rsp, errors.Prefix("Error encoding JSON response: ", err))
}
}
w.WriteHeader(rsp.Status)
_, _ = w.Write(jsonResponse)
}
func getTraceFromError(err error) []string {
trace := strings.Split(errors.Trace(err), "\n")
for index, element := range trace {
if strings.HasPrefix(element, "\t") {
trace[index] = strings.Replace(element, "\t", " ", 1)
}
}
}
// http://choly.ca/post/go-json-marshalling/
jsonResponse, err := json.MarshalIndent(&struct {
Success bool `json:"success"`
Error *string `json:"error"`
Data interface{} `json:"data"`
Trace []string `json:"_trace,omitempty"`
}{
Success: success,
Error: errorString,
Data: rsp.Data,
Trace: trace,
}, "", " ")
if err != nil {
Log(r, &rsp, errors.Prefix("Error encoding JSON response: ", err))
}
w.WriteHeader(rsp.Status)
w.Write(jsonResponse)
return trace
}
// IgnoredFormFields are ignored by FormValues() when checking for extraneous fields
@ -154,21 +210,25 @@ func FormValues(r *http.Request, params interface{}, validationRules []*v.FieldR
structValue := ref.Elem()
fields := map[string]bool{}
for i := 0; i < structType.NumField(); i++ {
name := structType.Field(i).Name
underscoredName := util.Underscore(name)
value := strings.TrimSpace(r.FormValue(underscoredName))
fieldName := structType.Field(i).Name
formattedName := util.Underscore(fieldName)
jsonName, ok := structType.Field(i).Tag.Lookup("json")
if ok {
formattedName = jsonName
}
value := strings.TrimSpace(r.FormValue(formattedName))
// if param is not set at all, continue
// comes after call to r.FormValue so form values get parsed internally (if they arent already)
if len(r.Form[underscoredName]) == 0 {
if len(r.Form[formattedName]) == 0 {
continue
}
fields[underscoredName] = true
fields[formattedName] = true
isPtr := false
var finalValue reflect.Value
structField := structValue.FieldByName(name)
structField := structValue.FieldByName(fieldName)
structFieldKind := structField.Kind()
if structFieldKind == reflect.Ptr {
isPtr = true
@ -184,7 +244,7 @@ func FormValues(r *http.Request, params interface{}, validationRules []*v.FieldR
}
castVal, err := cast.ToInt64E(value)
if err != nil {
return errors.Err("%s: must be an integer", underscoredName)
return errors.Err("%s: must be an integer", formattedName)
}
switch structFieldKind {
case reflect.Int:
@ -204,7 +264,7 @@ func FormValues(r *http.Request, params interface{}, validationRules []*v.FieldR
}
castVal, err := cast.ToUint64E(value)
if err != nil {
return errors.Err("%s: must be an unsigned integer", underscoredName)
return errors.Err("%s: must be an unsigned integer", formattedName)
}
switch structFieldKind {
case reflect.Uint:
@ -224,7 +284,7 @@ func FormValues(r *http.Request, params interface{}, validationRules []*v.FieldR
}
if !validator.IsBoolString(value) {
return errors.Err("%s: must be one of the following values: %s",
underscoredName, strings.Join(validator.GetBoolStringValues(), ", "))
formattedName, strings.Join(validator.GetBoolStringValues(), ", "))
}
finalValue = reflect.ValueOf(validator.IsTruthy(value))
@ -234,7 +294,7 @@ func FormValues(r *http.Request, params interface{}, validationRules []*v.FieldR
}
castVal, err := cast.ToFloat64E(value)
if err != nil {
return errors.Err("%s: must be a floating point number", underscoredName)
return errors.Err("%s: must be a floating point number", formattedName)
}
switch structFieldKind {
case reflect.Float32:
@ -243,7 +303,7 @@ func FormValues(r *http.Request, params interface{}, validationRules []*v.FieldR
finalValue = reflect.ValueOf(float64(castVal))
}
default:
return errors.Err("field %s is an unsupported type", name)
return errors.Err("field %s is an unsupported type", fieldName)
}
if isPtr {

View file

@ -7,7 +7,7 @@ import (
"sort"
"strings"
"github.com/lbryio/lbry.go/errors"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/btcsuite/btcutil/base58"
"golang.org/x/crypto/sha3"

779
extras/jsonrpc/daemon.go Normal file
View file

@ -0,0 +1,779 @@
package jsonrpc
import (
"encoding/json"
"fmt"
"net/http"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/fatih/structs"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/mitchellh/mapstructure"
"github.com/shopspring/decimal"
log "github.com/sirupsen/logrus"
"github.com/ybbus/jsonrpc/v2"
)
const DefaultPort = 5279
const (
ErrorWalletNotLoaded = "WalletNotLoadedError"
ErrorWalletAlreadyLoaded = "WalletAlreadyLoadedError"
ErrorWalletNotFound = "WalletNotFoundError"
ErrorWalletAlreadyExists = "WalletAlreadyExistsError"
)
type Client struct {
conn jsonrpc.RPCClient
address string
}
type Error struct {
Code int
Name string
Message string
}
func NewClient(address string) *Client {
d := Client{}
if address == "" {
address = "http://localhost:" + strconv.Itoa(DefaultPort)
}
d.conn = jsonrpc.NewClient(address)
d.address = address
return &d
}
func NewClientAndWait(address string) *Client {
d := NewClient(address)
for {
_, err := d.AccountBalance(nil)
if err == nil {
return d
}
time.Sleep(5 * time.Second)
}
}
func Decode(data interface{}, targetStruct interface{}) error {
config := &mapstructure.DecoderConfig{
Metadata: nil,
Result: targetStruct,
TagName: "json",
//WeaklyTypedInput: true,
DecodeHook: fixDecodeProto,
}
decoder, err := mapstructure.NewDecoder(config)
if err != nil {
return errors.Wrap(err, 0)
}
err = decoder.Decode(data)
if err != nil {
return errors.Wrap(err, 0)
}
return nil
}
// WrapError adds error metadata from JSONRPC error response for clients to access
func WrapError(rpcError *jsonrpc.RPCError) Error {
e := Error{Code: rpcError.Code, Message: rpcError.Message}
if d, ok := rpcError.Data.(map[string]interface{}); ok {
e.Name = d["name"].(string)
}
return e
}
func decodeNumber(data interface{}) (decimal.Decimal, error) {
var number string
switch d := data.(type) {
case json.Number:
number = d.String()
case string:
number = d
default:
return decimal.Decimal{}, errors.Err("unexpected number type")
}
dec, err := decimal.NewFromString(number)
if err != nil {
return decimal.Decimal{}, errors.Wrap(err, 0)
}
return dec, nil
}
func debugParams(params map[string]interface{}) string {
var s []string
for k, v := range params {
r := reflect.ValueOf(v)
if r.Kind() == reflect.Ptr {
if r.IsNil() {
continue
}
v = r.Elem().Interface()
}
s = append(s, fmt.Sprintf("%s=%+v", k, v))
}
sort.Strings(s)
return strings.Join(s, " ")
}
func (e Error) Error() string {
return fmt.Sprintf("Error in daemon: %s", e.Message)
}
func (d *Client) callNoDecode(command string, params map[string]interface{}) (interface{}, error) {
log.Debugln("jsonrpc: " + command + " " + debugParams(params))
r, err := d.conn.Call(command, params)
if err != nil {
return nil, errors.Wrap(err, 0)
}
if r.Error != nil {
return nil, WrapError(r.Error)
}
return r.Result, nil
}
func (d *Client) call(response interface{}, command string, params map[string]interface{}) error {
result, err := d.callNoDecode(command, params)
if err != nil {
return err
}
return Decode(result, response)
}
func (d *Client) SetRPCTimeout(timeout time.Duration) {
d.conn = jsonrpc.NewClientWithOpts(d.address, &jsonrpc.RPCClientOpts{
HTTPClient: &http.Client{Timeout: timeout},
})
}
//============================================
// NEW SDK
//============================================
func (d *Client) AccountSend(accountID *string, amount, toAddress string) (*TransactionSummary, error) {
response := new(TransactionSummary)
args := struct {
AccountID *string `json:"account_id"`
Amount string `json:"amount"`
Addresses string `json:"addresses"`
}{
AccountID: accountID,
Amount: amount,
Addresses: toAddress,
}
structs.DefaultTagName = "json"
return response, d.call(response, "account_send", structs.Map(args))
}
func (d *Client) AccountList(page uint64, pageSize uint64) (*AccountListResponse, error) {
response := new(AccountListResponse)
return response, d.call(response, "account_list", map[string]interface{}{
"page": page,
"page_size": pageSize,
})
}
func (d *Client) AccountListForWallet(walletID string) (*AccountListResponse, error) {
response := new(AccountListResponse)
return response, d.call(response, "account_list", map[string]interface{}{"wallet_id": walletID})
}
func (d *Client) SingleAccountList(accountID string) (*AccountListResponse, error) {
response := new(AccountListResponse)
return response, d.call(response, "account_list", map[string]interface{}{"account_id": accountID})
}
type AccountSettings struct {
Default *bool `json:"default,omitempty"`
NewName *string `json:"new_name,omitempty"`
ReceivingGap *int `json:"receiving_gap,omitempty"`
ReceivingMaxUses *int `json:"receiving_max_uses,omitempty"`
ChangeGap *int `json:"change_gap,omitempty"`
ChangeMaxUses *int `json:"change_max_uses,omitempty"`
}
func (d *Client) AccountSet(accountID string, settings AccountSettings) (*Account, error) {
response := new(Account)
args := struct {
AccountID string `json:"account_id"`
AccountSettings `json:",flatten"`
}{
AccountID: accountID,
AccountSettings: settings,
}
structs.DefaultTagName = "json"
return response, d.call(response, "account_set", structs.Map(args))
}
func (d *Client) AccountBalance(account *string) (*AccountBalanceResponse, error) {
response := new(AccountBalanceResponse)
return response, d.call(response, "account_balance", map[string]interface{}{
"account_id": account,
})
}
// funds an account. If everything is true then amount is ignored
func (d *Client) AccountFund(fromAccount string, toAccount string, amount string, outputs uint64, everything bool) (*AccountFundResponse, error) {
response := new(AccountFundResponse)
return response, d.call(response, "account_fund", map[string]interface{}{
"from_account": fromAccount,
"to_account": toAccount,
"amount": amount,
"outputs": outputs,
"everything": everything,
"broadcast": true,
})
}
func (d *Client) AccountCreate(accountName string, singleKey bool) (*Account, error) {
response := new(Account)
return response, d.call(response, "account_create", map[string]interface{}{
"account_name": accountName,
"single_key": singleKey,
})
}
func (d *Client) AccountRemove(accountID string) (*Account, error) {
response := new(Account)
return response, d.call(response, "account_remove", map[string]interface{}{
"account_id": accountID,
})
}
func (d *Client) AddressUnused(account *string) (*AddressUnusedResponse, error) {
response := new(AddressUnusedResponse)
return response, d.call(response, "address_unused", map[string]interface{}{
"account_id": account,
})
}
func (d *Client) TransactionShow(txid string) (*TransactionSummary, error) {
response := new(TransactionSummary)
return response, d.call(response, "transaction_show", map[string]interface{}{
"txid": txid,
})
}
func (d *Client) ChannelList(account *string, page uint64, pageSize uint64, wid *string) (*ChannelListResponse, error) {
if page == 0 {
return nil, errors.Err("pages start from 1")
}
response := new(ChannelListResponse)
return response, d.call(response, "channel_list", map[string]interface{}{
"account_id": account,
"page": page,
"page_size": pageSize,
"include_protobuf": true,
"wallet_id": wid,
})
}
type streamType string
var (
StreamTypeVideo = streamType("video")
StreamTypeAudio = streamType("audio")
StreamTypeImage = streamType("image")
)
type Location struct {
Country *string `json:"country,omitempty"`
State *string `json:"state,omitempty"`
City *string `json:"city,omitempty"`
PostalCode *string `json:"code,omitempty"`
Latitude *string `json:"latitude,omitempty"`
Longitude *string `json:"longitude,omitempty"`
}
type ClaimCreateOptions struct {
Title *string `json:"title,omitempty"`
Description *string `json:"description,omitempty"`
Tags []string `json:"tags,omitempty"`
Languages []string `json:"languages,omitempty"`
Locations []Location `json:"locations,omitempty"`
ThumbnailURL *string `json:"thumbnail_url,omitempty"`
AccountID *string `json:"account_id,omitempty"`
ClaimAddress *string `json:"claim_address,omitempty"`
Preview *bool `json:"preview,omitempty"`
FundingAccountIDs []string `json:"funding_account_ids,omitempty"`
}
type ChannelCreateOptions struct {
ClaimCreateOptions `json:",flatten"`
Email *string `json:"email,omitempty"`
WebsiteURL *string `json:"website_url,omitempty"`
CoverURL *string `json:"cover_url,omitempty"`
Featured []string `json:"featured,omitempty"`
AccountID *string `json:"account_id,omitempty"`
FundingAccountIDs []string `json:"funding_account_ids,omitempty"`
}
func (d *Client) ChannelCreate(name string, bid float64, options ChannelCreateOptions) (*TransactionSummary, error) {
response := new(TransactionSummary)
args := struct {
Name string `json:"name"`
Bid string `json:"bid"`
FilePath string `json:"file_path,omitempty"`
IncludeProtoBuf bool `json:"include_protobuf"`
ChannelCreateOptions `json:",flatten"`
Blocking bool `json:"blocking"`
}{
Name: name,
Bid: fmt.Sprintf("%.6f", bid),
IncludeProtoBuf: true,
ChannelCreateOptions: options,
Blocking: true,
}
structs.DefaultTagName = "json"
return response, d.call(response, "channel_create", structs.Map(args))
}
type ChannelUpdateOptions struct {
ChannelCreateOptions `json:",flatten"`
NewSigningKey *bool `json:"new_signing_key,omitempty"`
ClearFeatured *bool `json:"clear_featured,omitempty"`
ClearTags *bool `json:"clear_tags,omitempty"`
ClearLanguages *bool `json:"clear_languages,omitempty"`
ClearLocations *bool `json:"clear_locations,omitempty"`
Bid *string `json:"bid,omitempty"`
}
func (d *Client) ChannelUpdate(claimID string, options ChannelUpdateOptions) (*TransactionSummary, error) {
response := new(TransactionSummary)
args := struct {
ClaimID string `json:"claim_id"`
IncludeProtoBuf bool `json:"include_protobuf"`
*ChannelUpdateOptions `json:",flatten"`
Blocking bool `json:"blocking"`
}{
ClaimID: claimID,
IncludeProtoBuf: true,
ChannelUpdateOptions: &options,
Blocking: true,
}
structs.DefaultTagName = "json"
return response, d.call(response, "channel_update", structs.Map(args))
}
type StreamCreateOptions struct {
ClaimCreateOptions `json:",flatten"`
Fee *Fee `json:",omitempty,flatten"`
Author *string `json:"author,omitempty"`
License *string `json:"license,omitempty"`
LicenseURL *string `json:"license_url,omitempty"`
StreamType *streamType `json:"stream_type,omitempty"`
ReleaseTime *int64 `json:"release_time,omitempty"`
Duration *uint64 `json:"duration,omitempty"`
Width *uint `json:"width,omitempty"`
Height *uint `json:"height,omitempty"`
Preview *string `json:"preview,omitempty"`
AllowDuplicateName *bool `json:"allow_duplicate_name,omitempty"`
ChannelName *string `json:"channel_name,omitempty"`
ChannelID *string `json:"channel_id,omitempty"`
ChannelAccountID *string `json:"channel_account_id,omitempty"`
AccountID *string `json:"account_id,omitempty"`
}
func (d *Client) StreamCreate(name, filePath string, bid float64, options StreamCreateOptions) (*TransactionSummary, error) {
response := new(TransactionSummary)
args := struct {
Name string `json:"name"`
Bid string `json:"bid"`
FilePath string `json:"file_path,omitempty"`
FileSize *string `json:"file_size,omitempty"`
IncludeProtoBuf bool `json:"include_protobuf"`
Blocking bool `json:"blocking"`
*StreamCreateOptions `json:",flatten"`
}{
Name: name,
FilePath: filePath,
Bid: fmt.Sprintf("%.6f", bid),
IncludeProtoBuf: true,
Blocking: true,
StreamCreateOptions: &options,
}
structs.DefaultTagName = "json"
return response, d.call(response, "stream_create", structs.Map(args))
}
func (d *Client) StreamAbandon(txID string, nOut uint64, accountID *string, blocking bool) (*ClaimAbandonResponse, error) {
response := new(ClaimAbandonResponse)
err := d.call(response, "stream_abandon", map[string]interface{}{
"txid": txID,
"nout": nOut,
"account_id": accountID,
"include_protobuf": true,
"blocking": true,
})
if err != nil {
return nil, err
}
return response, nil
}
type StreamUpdateOptions struct {
ClearTags *bool `json:"clear_tags,omitempty"`
ClearLanguages *bool `json:"clear_languages,omitempty"`
ClearLocations *bool `json:"clear_locations,omitempty"`
Name *string `json:"name,omitempty"`
FilePath *string `json:"file_path,omitempty"`
FileSize *uint64 `json:"file_size,omitempty"`
Bid *string `json:"bid,omitempty"`
*StreamCreateOptions `json:",flatten"`
}
func (d *Client) StreamUpdate(claimID string, options StreamUpdateOptions) (*TransactionSummary, error) {
response := new(TransactionSummary)
args := struct {
ClaimID string `json:"claim_id"`
IncludeProtoBuf bool `json:"include_protobuf"`
*StreamUpdateOptions `json:",flatten"`
Blocking bool `json:"blocking"`
}{
ClaimID: claimID,
IncludeProtoBuf: true,
StreamUpdateOptions: &options,
Blocking: true,
}
structs.DefaultTagName = "json"
return response, d.call(response, "stream_update", structs.Map(args))
}
func (d *Client) ChannelAbandon(txID string, nOut uint64, accountID *string, blocking bool) (*TransactionSummary, error) {
response := new(TransactionSummary)
err := d.call(response, "channel_abandon", map[string]interface{}{
"txid": txID,
"nout": nOut,
"account_id": accountID,
"include_protobuf": true,
"blocking": true,
})
if err != nil {
return nil, err
}
return response, nil
}
func (d *Client) AddressList(account *string, address *string, page uint64, pageSize uint64) (*AddressListResponse, error) {
response := new(AddressListResponse)
args := struct {
AccountID *string `json:"account_id,omitempty"`
Address *string `json:"address,omitempty"`
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
}{
AccountID: account,
Address: address,
Page: page,
PageSize: pageSize,
}
structs.DefaultTagName = "json"
return response, d.call(response, "address_list", structs.Map(args))
}
func (d *Client) StreamList(account *string, page uint64, pageSize uint64) (*StreamListResponse, error) {
response := new(StreamListResponse)
err := d.call(response, "stream_list", map[string]interface{}{
"account_id": account,
"include_protobuf": true,
"page": page,
"page_size": pageSize,
})
if err != nil {
return nil, err
}
return response, nil
}
func (d *Client) ClaimList(account *string, page uint64, pageSize uint64) (*ClaimListResponse, error) {
if page == 0 {
return nil, errors.Err("pages start from 1")
}
response := new(ClaimListResponse)
err := d.call(response, "claim_list", map[string]interface{}{
"account_id": account,
"page": page,
"page_size": pageSize,
"include_protobuf": true,
})
if err != nil {
return nil, err
}
return response, nil
}
func (d *Client) Status() (*StatusResponse, error) {
response := new(StatusResponse)
return response, d.call(response, "status", map[string]interface{}{})
}
func (d *Client) TransactionList(account *string, wallet *string, page uint64, pageSize uint64) (*TransactionListResponse, error) {
response := new(TransactionListResponse)
return response, d.call(response, "transaction_list", map[string]interface{}{
"account_id": account,
"wallet_id": wallet,
"page": page,
"page_size": pageSize,
})
}
func (d *Client) UTXOList(account *string, page uint64, pageSize uint64) (*UTXOListResponse, error) {
response := new(UTXOListResponse)
return response, d.call(response, "utxo_list", map[string]interface{}{
"account_id": account,
"page": page,
"page_size": pageSize,
})
}
func (d *Client) UTXORelease(account *string) (*UTXOReleaseResponse, error) {
response := new(UTXOReleaseResponse)
return response, d.call(response, "utxo_release", map[string]interface{}{
"account_id": account,
})
}
func (d *Client) Get(uri string) (*GetResponse, error) {
response := new(GetResponse)
return response, d.call(response, "get", map[string]interface{}{
"uri": uri,
"include_protobuf": true,
})
}
func (d *Client) FileList(page uint64, pageSize uint64) (*FileListResponse, error) {
response := new(FileListResponse)
return response, d.call(response, "file_list", map[string]interface{}{
"include_protobuf": true,
"page": page,
"page_size": pageSize,
})
}
func (d *Client) Version() (*VersionResponse, error) {
response := new(VersionResponse)
return response, d.call(response, "version", map[string]interface{}{})
}
func (d *Client) Resolve(urls string) (*ResolveResponse, error) {
response := new(ResolveResponse)
return response, d.call(response, "resolve", map[string]interface{}{
"urls": urls,
"include_protobuf": true,
})
}
type ClaimSearchArgs struct {
ClaimID *string `json:"claim_id,omitempty"`
TXID *string `json:"txid,omitempty"`
Nout *uint `json:"nout,omitempty"`
Name *string `json:"name,omitempty"`
ClaimType []string `json:"claim_type,omitempty"`
OrderBy []string `json:"order_by,omitempty"`
LimitClaimsPerChannel *int `json:"limit_claims_per_channel,omitempty"`
HasNoSource *bool `json:"has_no_source,omitempty"`
ReleaseTime string `json:"release_time,omitempty"`
ChannelIDs []string `json:"channel_ids,omitempty"`
NoTotals *bool `json:"no_totals,omitempty"`
IncludeProtobuf *bool `json:"include_protobuf,omitempty"`
AnyTags []string `json:"any_tags,omitempty"`
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
}
func (d *Client) ClaimSearch(args ClaimSearchArgs) (*ClaimSearchResponse, error) {
response := new(ClaimSearchResponse)
if args.NoTotals == nil {
nototals := true
args.NoTotals = &nototals
}
if args.IncludeProtobuf == nil {
include := true
args.IncludeProtobuf = &include
}
structs.DefaultTagName = "json"
return response, d.call(response, "claim_search", structs.Map(args))
}
func (d *Client) ChannelExport(channelClaimID string, channelName, accountID *string) (*ChannelExportResponse, error) {
response := new(ChannelExportResponse)
return response, d.call(response, "channel_export", map[string]interface{}{
"channel_id": channelClaimID,
"channel_name": channelName,
"account_id": accountID,
})
}
func (d *Client) ChannelImport(key string, walletID *string) (*ChannelImportResponse, error) {
response := new(ChannelImportResponse)
return response, d.call(response, "channel_import", map[string]interface{}{
"channel_data": key,
"wallet_id": walletID,
})
}
func (d *Client) SupportList(accountID *string, page uint64, pageSize uint64) (*SupportListResponse, error) {
response := new(SupportListResponse)
return response, d.call(response, "support_list", map[string]interface{}{
"account_id": accountID,
"page": page,
"page_size": pageSize,
})
}
func (d *Client) SupportCreate(claimID string, amount string, tip *bool, accountID *string, fundingAccountIDs []string, walletID *string) (*TransactionSummary, error) {
response := new(TransactionSummary)
args := struct {
ClaimID string `json:"claim_id"`
Amount string `json:"amount"`
Tip *bool `json:"tip,omitempty"`
AccountID *string `json:"account_id,omitempty"`
FundingAccountIDs []string `json:"funding_account_ids,omitempty"`
Preview bool `json:"preview,omitempty"`
Blocking bool `json:"blocking,omitempty"`
WalletID *string `json:"wallet_id,omitempty"`
}{
ClaimID: claimID,
AccountID: accountID,
Blocking: true,
Amount: amount,
FundingAccountIDs: fundingAccountIDs,
Preview: false,
Tip: tip,
}
structs.DefaultTagName = "json"
return response, d.call(response, "support_create", structs.Map(args))
}
func (d *Client) SupportAbandon(claimID *string, txid *string, nout *uint, keep *string, accountID *string) (*TransactionSummary, error) {
if claimID == nil && (txid == nil || nout == nil) {
return nil, errors.Err("either claimID or txid+nout must be supplied")
}
response := new(TransactionSummary)
args := struct {
ClaimID *string `json:"claim_id,omitempty"`
TxID *string `json:"claim_id,omitempty"`
Nout *uint `json:"nout,omitempty"`
AccountID *string `json:"account_id,omitempty"`
Preview bool `json:"preview,omitempty"`
Blocking bool `json:"blocking,omitempty"`
}{
ClaimID: claimID,
AccountID: accountID,
Nout: nout,
TxID: txid,
Blocking: true,
Preview: false,
}
structs.DefaultTagName = "json"
return response, d.call(response, "support_abandon", structs.Map(args))
}
func (d *Client) TxoSpend(txoType, claimID, txid, channelID, name, accountID *string) (*[]TransactionSummary, error) {
if txoType == nil && claimID == nil && txid == nil && channelID == nil && name == nil {
return nil, errors.Err("either txoType or claimID or channelID or name or txid must be supplied")
}
response := new([]TransactionSummary)
args := struct {
ClaimID *string `json:"claim_id,omitempty"`
ChannelID *string `json:"channel_id,omitempty"`
Name *string `json:"name,omitempty"`
TxID *string `json:"claim_id,omitempty"`
Type *string `json:"type,omitempty"`
AccountID *string `json:"account_id,omitempty"`
Preview bool `json:"preview,omitempty"`
Blocking bool `json:"blocking,omitempty"`
IncludeFullTx bool `json:"include_full_tx,omitempty"`
}{
ClaimID: claimID,
ChannelID: channelID,
Name: name,
Type: txoType,
AccountID: accountID,
TxID: txid,
Blocking: true,
Preview: false,
IncludeFullTx: true,
}
structs.DefaultTagName = "json"
return response, d.call(response, "txo_spend", structs.Map(args))
}
func (d *Client) AccountAdd(accountName string, seed *string, privateKey *string, publicKey *string, singleKey *bool, walletID *string) (*Account, error) {
response := new(Account)
args := struct {
AccountName string `json:"account_name"`
Seed *string `json:"seed,omitempty"`
PrivateKey *string `json:"private_key,omitempty"`
PublicKey *string `json:"public_key,omitempty"`
SingleKey *bool `json:"single_key,omitempty"`
WalletID *string `json:"wallet_id,omitempty"`
}{
AccountName: accountName,
Seed: seed,
PrivateKey: privateKey,
PublicKey: publicKey,
SingleKey: singleKey,
WalletID: walletID,
}
structs.DefaultTagName = "json"
return response, d.call(response, "account_add", structs.Map(args))
}
type WalletCreateOpts struct {
ID string `json:"wallet_id"`
SkipOnStartup bool `json:"skip_on_startup,omitempty"`
CreateAccount bool `json:"create_account,omitempty"`
SingleKey bool `json:"single_key,omitempty"`
}
func (d *Client) WalletCreate(id string, opts *WalletCreateOpts) (*Wallet, error) {
response := new(Wallet)
if opts == nil {
opts = &WalletCreateOpts{}
}
opts.ID = id
structs.DefaultTagName = "json"
return response, d.call(response, "wallet_create", structs.Map(opts))
}
func (d *Client) WalletAdd(id string) (*Wallet, error) {
response := new(Wallet)
return response, d.call(response, "wallet_add", map[string]interface{}{"wallet_id": id})
}
func (d *Client) WalletList(id string, page uint64, pageSize uint64) (*WalletList, error) {
response := new(WalletList)
params := map[string]interface {
}{
"page": page,
"page_size": pageSize,
}
if id != "" {
params["wallet_id"] = id
}
return response, d.call(response, "wallet_list", params)
}
func (d *Client) WalletRemove(id string) (*Wallet, error) {
response := new(Wallet)
return response, d.call(response, "wallet_remove", map[string]interface{}{"wallet_id": id})
}

View file

@ -0,0 +1,845 @@
package jsonrpc
import (
"encoding/json"
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/util"
)
func prettyPrint(i interface{}) {
s, _ := json.MarshalIndent(i, "", "\t")
fmt.Println(string(s))
}
func TestMain(m *testing.M) {
rand.Seed(time.Now().UnixNano())
code := m.Run()
os.Exit(code)
}
func TestClient_AccountFund(t *testing.T) {
d := NewClient("")
accounts, err := d.AccountList(1, 20)
if err != nil {
t.Error(err)
return
}
account := (accounts.Items)[0].ID
balanceString, err := d.AccountBalance(&account)
if err != nil {
t.Error(err)
return
}
balance, err := strconv.ParseFloat(balanceString.Available.String(), 64)
if err != nil {
t.Error(err)
return
}
got, err := d.AccountFund(account, account, fmt.Sprintf("%f", balance/2.0), 40, false)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_AccountSend(t *testing.T) {
d := NewClient("")
accounts, err := d.AccountList(1, 20)
if !assert.NoError(t, err) {
return
}
if !assert.NotEmpty(t, accounts.Items[1].ID) {
return
}
account := (accounts.Items)[1].ID
addressess, err := d.AddressList(&account, nil, 1, 20)
if !assert.NoError(t, err) {
return
}
if !assert.NotEmpty(t, addressess.Items) {
return
}
got, err := d.AccountSend(&account, "0.01", string(addressess.Items[0].Address))
if !assert.NoError(t, err) {
return
}
prettyPrint(*got)
}
func TestClient_AccountList(t *testing.T) {
d := NewClient("")
got, err := d.AccountList(1, 20)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_SingleAccountList(t *testing.T) {
d := NewClient("")
name := "test" + fmt.Sprintf("%d", rand.Int()) + "@lbry.com"
createdAccount, err := d.AccountCreate(name, false)
if err != nil {
t.Fatal(err)
}
account, err := d.SingleAccountList(createdAccount.ID)
if err != nil {
t.Error(err)
}
prettyPrint(*createdAccount)
prettyPrint(*account)
if err != nil {
t.Fatal(err)
}
if account.Items[0].Name != name {
t.Fatalf("account name mismatch: %v != %v", account.Items[0].Name, name)
}
}
func TestClient_AccountBalance(t *testing.T) {
d := NewClient("")
got, err := d.AccountBalance(nil)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_AddressUnused(t *testing.T) {
d := NewClient("")
got, err := d.AddressUnused(nil)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_ChannelList(t *testing.T) {
d := NewClient("")
got, err := d.ChannelList(nil, 1, 50, nil)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
var channelID string
func TestClient_ChannelCreate(t *testing.T) {
d := NewClient("")
got, err := d.ChannelCreate("@Test"+fmt.Sprintf("%d", time.Now().Unix()), 1.337, ChannelCreateOptions{
ClaimCreateOptions: ClaimCreateOptions{
Title: util.PtrToString("Mess with the channels"),
Description: util.PtrToString("And you'll get what you deserve"),
Tags: []string{"we", "got", "tags"},
Languages: []string{"en-US"},
Locations: []Location{{
Country: util.PtrToString("CH"),
State: util.PtrToString("Ticino"),
City: util.PtrToString("Lugano"),
}},
ThumbnailURL: util.PtrToString("https://scrn.storni.info/2022-06-10_17-18-29-409175881.png"),
},
Email: util.PtrToString("niko@lbry.com"),
WebsiteURL: util.PtrToString("https://lbry.com"),
CoverURL: util.PtrToString("https://scrn.storni.info/2022-06-10_17-18-29-409175881.png"),
})
if err != nil {
t.Error(err)
return
}
channelID = got.Outputs[0].ClaimID
prettyPrint(*got)
}
func TestClient_StreamCreate(t *testing.T) {
_ = os.Setenv("BLOCKCHAIN_NAME", "lbrycrd_regtest")
d := NewClient("")
addressResponse, err := d.AddressUnused(nil)
if err != nil {
t.Error(err)
return
}
address := string(*addressResponse)
f, e := os.OpenFile("/tmp/test.txt", os.O_RDONLY|os.O_CREATE, 0666)
if e != nil {
t.Error(e)
return
}
_, _ = f.WriteString("test")
got, err := d.StreamCreate("test"+fmt.Sprintf("%d", time.Now().Unix()), "/tmp/test.txt", 1.437, StreamCreateOptions{
ClaimCreateOptions: ClaimCreateOptions{
Title: util.PtrToString("This is a Test Title" + fmt.Sprintf("%d", time.Now().Unix())),
Description: util.PtrToString("My Special Description"),
Tags: []string{"nsfw", "test"},
Languages: []string{"en-US", "fr-CH"},
Locations: []Location{{
Country: util.PtrToString("CH"),
State: util.PtrToString("Ticino"),
City: util.PtrToString("Lugano"),
PostalCode: util.PtrToString("6900"),
Latitude: nil,
Longitude: nil,
}},
ThumbnailURL: util.PtrToString("https://scrn.storni.info/2019-01-18_16-37-39-098537783.png"),
AccountID: nil,
ClaimAddress: &address,
Preview: nil,
},
Fee: &Fee{
FeeCurrency: "LBC",
FeeAmount: decimal.NewFromFloat(1.0),
FeeAddress: &address,
},
Author: util.PtrToString("Niko"),
License: util.PtrToString("FREE"),
LicenseURL: nil,
ReleaseTime: nil,
Duration: nil,
Preview: nil,
AllowDuplicateName: nil,
ChannelName: nil,
ChannelID: util.PtrToString(channelID),
ChannelAccountID: nil,
})
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_ChannelUpdate(t *testing.T) {
d := NewClient("")
got, err := d.ChannelUpdate(channelID, ChannelUpdateOptions{
Bid: util.PtrToString("0.01"),
ClearLanguages: util.PtrToBool(true),
ClearLocations: util.PtrToBool(true),
ClearTags: util.PtrToBool(true),
ChannelCreateOptions: ChannelCreateOptions{
ClaimCreateOptions: ClaimCreateOptions{
Title: util.PtrToString("Mess with the channels"),
Description: util.PtrToString("And you'll get what you deserve"),
Tags: []string{"we", "got", "more", "tags"},
Languages: []string{"en-US"},
Locations: []Location{{
Country: util.PtrToString("CH"),
State: util.PtrToString("Ticino"),
City: util.PtrToString("Lugano"),
}},
ThumbnailURL: util.PtrToString("https://scrn.storni.info/2019-04-12_15-43-25-001592625.png"),
},
Email: util.PtrToString("niko@lbry.com"),
WebsiteURL: util.PtrToString("https://lbry.com"),
CoverURL: util.PtrToString("https://scrn.storni.info/2019-04-12_15-43-25-001592625.png"),
}})
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_ChannelAbandon(t *testing.T) {
d := NewClient("")
channelName := "@TestToDelete" + fmt.Sprintf("%d", time.Now().Unix())
channelResponse, err := d.ChannelCreate(channelName, 13.37, ChannelCreateOptions{
ClaimCreateOptions: ClaimCreateOptions{
Title: util.PtrToString("Mess with the channels"),
Description: util.PtrToString("And you'll get what you deserve"),
Tags: []string{"we", "got", "tags"},
Languages: []string{"en-US"},
Locations: []Location{{
Country: util.PtrToString("CH"),
State: util.PtrToString("Ticino"),
City: util.PtrToString("Lugano"),
}},
ThumbnailURL: util.PtrToString("https://scrn.storni.info/2019-04-12_15-43-25-001592625.png"),
},
Email: util.PtrToString("niko@lbry.com"),
WebsiteURL: util.PtrToString("https://lbry.com"),
CoverURL: util.PtrToString("https://scrn.storni.info/2019-04-12_15-43-25-001592625.png"),
})
if err != nil {
t.Error(err)
return
}
txID := channelResponse.Outputs[0].Txid
nout := channelResponse.Outputs[0].Nout
time.Sleep(10 * time.Second)
got, err := d.ChannelAbandon(txID, nout, nil, false)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_AddressList(t *testing.T) {
d := NewClient("")
got, err := d.AddressList(nil, nil, 1, 20)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_ClaimList(t *testing.T) {
_ = os.Setenv("BLOCKCHAIN_NAME", "lbrycrd_regtest")
d := NewClient("")
got, err := d.ClaimList(nil, 1, 10)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_StreamList(t *testing.T) {
_ = os.Setenv("BLOCKCHAIN_NAME", "lbrycrd_regtest")
d := NewClient("")
got, err := d.StreamList(nil, 1, 20)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_TransactionList(t *testing.T) {
_ = os.Setenv("BLOCKCHAIN_NAME", "lbrycrd_regtest")
d := NewClient("")
got, err := d.TransactionList(nil, nil, 1, 20)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_SupportTest(t *testing.T) {
_ = os.Setenv("BLOCKCHAIN_NAME", "lbrycrd_regtest")
d := NewClient("")
got, err := d.ChannelCreate("@Test"+fmt.Sprintf("%d", time.Now().Unix()), 13.37, ChannelCreateOptions{
ClaimCreateOptions: ClaimCreateOptions{
Title: util.PtrToString("Mess with the channels"),
Description: util.PtrToString("And you'll get what you deserve"),
Tags: []string{"we", "got", "tags"},
Languages: []string{"en-US"},
Locations: []Location{{
Country: util.PtrToString("CH"),
State: util.PtrToString("Ticino"),
City: util.PtrToString("Lugano"),
}},
ThumbnailURL: util.PtrToString("https://scrn.storni.info/2019-04-12_15-43-25-001592625.png"),
},
Email: util.PtrToString("niko@lbry.com"),
WebsiteURL: util.PtrToString("https://lbry.com"),
CoverURL: util.PtrToString("https://scrn.storni.info/2019-04-12_15-43-25-001592625.png"),
})
if err != nil {
t.Error(err)
return
}
time.Sleep(10 * time.Second)
got2, err := d.SupportCreate(got.Outputs[0].ClaimID, "1.0", util.PtrToBool(true), nil, nil, nil)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got2)
got3, err := d.SupportList(nil, 1, 10)
if err != nil {
t.Error(err)
return
}
found := false
for _, support := range got3.Items {
if support.ClaimID == got.Outputs[0].ClaimID {
found = true
}
}
if !found {
t.Error(errors.Err("support not found"))
return
}
prettyPrint(*got3)
got4, err := d.SupportAbandon(util.PtrToString(got.Outputs[0].ClaimID), nil, nil, nil, nil)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got4)
}
func TestClient_TxoSpendTest(t *testing.T) {
_ = os.Setenv("BLOCKCHAIN_NAME", "lbrycrd_regtest")
d := NewClient("")
got, err := d.ChannelCreate("@Test"+fmt.Sprintf("%d", time.Now().Unix()), 13.37, ChannelCreateOptions{
ClaimCreateOptions: ClaimCreateOptions{
Title: util.PtrToString("Mess with the channels"),
Description: util.PtrToString("And you'll get what you deserve"),
Tags: []string{"we", "got", "tags"},
Languages: []string{"en-US"},
Locations: []Location{{
Country: util.PtrToString("CH"),
State: util.PtrToString("Ticino"),
City: util.PtrToString("Lugano"),
}},
ThumbnailURL: util.PtrToString("https://scrn.storni.info/2019-04-12_15-43-25-001592625.png"),
},
Email: util.PtrToString("niko@lbry.com"),
WebsiteURL: util.PtrToString("https://lbry.com"),
CoverURL: util.PtrToString("https://scrn.storni.info/2019-04-12_15-43-25-001592625.png"),
})
if err != nil {
t.Error(err)
return
}
time.Sleep(10 * time.Second)
got2, err := d.SupportCreate(got.Outputs[0].ClaimID, "1.0", util.PtrToBool(true), nil, nil, nil)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got2)
got3, err := d.SupportList(nil, 1, 10)
if err != nil {
t.Error(err)
return
}
found := false
for _, support := range got3.Items {
if support.ClaimID == got.Outputs[0].ClaimID {
found = true
}
}
if !found {
t.Error(errors.Err("support not found"))
return
}
prettyPrint(*got3)
got4, err := d.TxoSpend(util.PtrToString("support"), util.PtrToString(got.Outputs[0].ClaimID), nil, nil, nil, nil)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got4)
time.Sleep(10 * time.Second)
got3, err = d.SupportList(nil, 1, 10)
if err != nil {
t.Error(err)
return
}
found = false
for _, support := range got3.Items {
if support.ClaimID == got.Outputs[0].ClaimID {
found = true
}
}
if found {
t.Error(errors.Err("support found even though it should have been abandoned"))
return
}
prettyPrint(*got3)
got4, err = d.TxoSpend(util.PtrToString("channel"), util.PtrToString(got.Outputs[0].ClaimID), nil, nil, nil, nil)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got4)
time.Sleep(10 * time.Second)
got5, err := d.ClaimList(nil, 1, 50)
if err != nil {
t.Error(err)
return
}
for _, claim := range got5.Claims {
if claim.ClaimID == got.Outputs[0].ClaimID {
t.Error(errors.Err("claim found even though it should have been abandoned"))
return
}
}
prettyPrint(*got5)
}
func TestClient_ClaimSearch(t *testing.T) {
d := NewClient("")
got, err := d.ClaimSearch(ClaimSearchArgs{
ChannelIDs: []string{channelID},
ReleaseTime: ">1633350820",
HasNoSource: util.PtrToBool(true),
OrderBy: []string{"^release_time"},
Page: 1,
PageSize: 20,
})
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_Status(t *testing.T) {
d := NewClient("")
got, err := d.Status()
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_UTXOList(t *testing.T) {
d := NewClient("")
got, err := d.UTXOList(nil, 1, 20)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_Version(t *testing.T) {
d := NewClient("")
got, err := d.Version()
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_GetFile(t *testing.T) {
_ = os.Setenv("BLOCKCHAIN_NAME", "lbrycrd_regtest")
d := NewClient("")
got, err := d.Get("lbry://test1559058649")
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_FileList(t *testing.T) {
_ = os.Setenv("BLOCKCHAIN_NAME", "lbrycrd_regtest")
d := NewClient("")
got, err := d.FileList(1, 20)
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_Resolve(t *testing.T) {
_ = os.Setenv("BLOCKCHAIN_NAME", "lbrycrd_regtest")
d := NewClient("")
got, err := d.Resolve("test1559058649")
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_AccountSet(t *testing.T) {
d := NewClient("")
accounts, err := d.AccountList(1, 20)
if err != nil {
t.Error(err)
return
}
account := (accounts.Items)[0].ID
got, err := d.AccountSet(account, AccountSettings{ChangeMaxUses: util.PtrToInt(10000)})
if err != nil {
t.Error(err)
return
}
prettyPrint(*got)
}
func TestClient_AccountCreate(t *testing.T) {
d := NewClient("")
name := "lbry#user#id:" + fmt.Sprintf("%d", rand.Int())
account, err := d.AccountCreate(name, false)
if err != nil {
t.Fatal(err)
return
}
if account.Name != name {
t.Errorf("account name mismatch, expected %q, got %q", name, account.Name)
return
}
prettyPrint(*account)
}
func TestClient_AccountAdd(t *testing.T) {
d := NewClient("")
name := "test" + fmt.Sprintf("%d", time.Now().Unix()) + "@lbry.com"
pubKey := "tpubDA9GDAntyJu4hD3wU7175p7CuV6DWbYXfyb2HedBA3yuBp9HZ4n3QE4Ex6RHCSiEuVp2nKAL1Lzf2ZLo9ApaFgNaJjG6Xo1wB3iEeVbrDZp"
account, err := d.AccountAdd(name, nil, nil, &pubKey, util.PtrToBool(true), nil)
if err != nil {
t.Fatal(err)
return
}
if account.Name != name {
t.Errorf("account name mismatch, expected %q, got %q", name, account.Name)
return
}
if account.PublicKey != pubKey {
t.Errorf("public key mismatch, expected %q, got %q", name, account.Name)
return
}
prettyPrint(*account)
}
func TestClient_AccountRemove(t *testing.T) {
d := NewClient("")
name := "lbry#user#id:" + fmt.Sprintf("%d", rand.Int())
createdAccount, err := d.AccountCreate(name, false)
if err != nil {
t.Fatal(err)
return
}
removedAccount, err := d.AccountRemove(createdAccount.ID)
if err != nil {
t.Error(err)
return
}
if removedAccount.ID != createdAccount.ID {
t.Error("accounts IDs mismatch")
}
account, err := d.SingleAccountList(createdAccount.ID)
if err != nil {
if strings.Contains(err.Error(), "Couldn't find account:") {
prettyPrint(*removedAccount)
return
}
t.Fatal(err)
}
t.Error("account was not removed")
prettyPrint(*account)
}
func TestClient_ChannelExport(t *testing.T) {
d := NewClient("")
response, err := d.ChannelExport(channelID, nil, nil)
if err != nil {
t.Error(err)
}
if response == nil || len(*response) == 0 {
t.Error("nothing returned!")
}
t.Log("Export:", *response)
}
func TestClient_ChannelImport(t *testing.T) {
d := NewClient("")
// A channel created just for automated testing purposes
channelName := "@LbryAutomatedTestChannel"
channelkey := "7943FWPBHZES4dUcMXSpDYwoM5a2tsyJT1R8V54QoUhekGcqmeH3hbzDXoLLQ8" +
"oKkfb99PgGK5efrZeYqaxg4X5XRJMJ6gKC8hqKcnwhYkmKDXmoBDNgd2ccZ9jhP8z" +
"HG3NJorAN9Hh4XMyBc5goBLZYYvC9MYvBmT3Fcteb5saqMvmQxFURv74NqXLQZC1t" +
"p6iRZKfTj77Pd5gsBsCYAbVmCqzbm5m1hHkUmfFEZVGcQNTYCDwZn543xSMYvSPnJ" +
"zt8tRYCJWaPdj713uENZZMo3gxuAMb1NwSnx8tbwETp7WPkpFLL6HZ9jKpB8BURHM" +
"F1RFD1PRyqbC6YezPyPQ2oninKKHdBduvXZG5KF2G2Q3ixsuE2ntifBBo1f5PotRk" +
"UanXKEafWxvXAayJjpsmZ4bFt7n6Xg4438WZXBiZKCPobLJAiHfe72n618kE6PCNU" +
"77cyU5Rk8J3CuY6QzZPzwuiXz2GLfkUMCYd9jGT6g53XbE6SwCsmGnd9NJkBAaJf5" +
"1FAYRURrhHnp79PAoHftEWtZEuU8MCPMdSRjzxYMRS4ScUzg5viDMTAkE8frsfCVZ" +
"hxsFwGUyNNno8eiqrrYmpbJGEwwK3S4437JboAUEFPdMNn8zNQWZcLLVrK9KyQeKM" +
"XpKkf4zJV6sZJ7gBMpzvPL18ULEgXTy7VsNBKmsfC1rM4WVG9ri1UixEcLDS79foC" +
"Jb3FnSr1T4MRKESeN3W"
response, err := d.ChannelImport(channelkey, nil)
if err != nil {
t.Error(err)
}
channels, err := d.ChannelList(nil, 1, 50, nil)
if err != nil {
t.Error(err)
}
seen := false
for _, c := range channels.Items {
if c.Name == channelName {
seen = true
}
}
if !seen {
t.Error("couldn't find imported channel")
}
t.Log("Response:", *response)
}
func TestClient_ChannelImportWithWalletID(t *testing.T) {
d := NewClient("")
id := "lbry#wallet#id:" + fmt.Sprintf("%d", rand.Int())
wallet, err := d.WalletCreate(id, nil)
if err != nil {
t.Error(err)
}
// A channel created just for automated testing purposes
channelName := "@LbryAutomatedTestChannel"
channelKey := "7943FWPBHZES4dUcMXSpDYwoM5a2tsyJT1R8V54QoUhekGcqmeH3hbzDXoLLQ8" +
"oKkfb99PgGK5efrZeYqaxg4X5XRJMJ6gKC8hqKcnwhYkmKDXmoBDNgd2ccZ9jhP8z" +
"HG3NJorAN9Hh4XMyBc5goBLZYYvC9MYvBmT3Fcteb5saqMvmQxFURv74NqXLQZC1t" +
"p6iRZKfTj77Pd5gsBsCYAbVmCqzbm5m1hHkUmfFEZVGcQNTYCDwZn543xSMYvSPnJ" +
"zt8tRYCJWaPdj713uENZZMo3gxuAMb1NwSnx8tbwETp7WPkpFLL6HZ9jKpB8BURHM" +
"F1RFD1PRyqbC6YezPyPQ2oninKKHdBduvXZG5KF2G2Q3ixsuE2ntifBBo1f5PotRk" +
"UanXKEafWxvXAayJjpsmZ4bFt7n6Xg4438WZXBiZKCPobLJAiHfe72n618kE6PCNU" +
"77cyU5Rk8J3CuY6QzZPzwuiXz2GLfkUMCYd9jGT6g53XbE6SwCsmGnd9NJkBAaJf5" +
"1FAYRURrhHnp79PAoHftEWtZEuU8MCPMdSRjzxYMRS4ScUzg5viDMTAkE8frsfCVZ" +
"hxsFwGUyNNno8eiqrrYmpbJGEwwK3S4437JboAUEFPdMNn8zNQWZcLLVrK9KyQeKM" +
"XpKkf4zJV6sZJ7gBMpzvPL18ULEgXTy7VsNBKmsfC1rM4WVG9ri1UixEcLDS79foC" +
"Jb3FnSr1T4MRKESeN3W"
response, err := d.ChannelImport(channelKey, &wallet.ID)
if err != nil {
t.Error(err)
}
channels, err := d.ChannelList(nil, 1, 50, &wallet.ID)
if err != nil {
t.Error(err)
}
seen := false
for _, c := range channels.Items {
if c.Name == channelName {
seen = true
}
}
if !seen {
t.Error("couldn't find imported channel")
}
t.Log("Response:", *response)
}
func TestClient_WalletCreate(t *testing.T) {
d := NewClient("")
id := "lbry#wallet#id:" + fmt.Sprintf("%d", rand.Int())
wallet, err := d.WalletCreate(id, nil)
if err != nil {
t.Fatal(err)
}
if wallet.ID != id {
prettyPrint(*wallet)
t.Fatalf("wallet ID mismatch, expected %q, got %q", id, wallet.Name)
}
}
func TestClient_WalletCreateWithOpts(t *testing.T) {
d := NewClient("")
id := "lbry#wallet#id:" + fmt.Sprintf("%d", rand.Int())
wallet, err := d.WalletCreate(id, &WalletCreateOpts{CreateAccount: true, SingleKey: true})
if err != nil {
t.Fatal(err)
}
accounts, err := d.AccountListForWallet(id)
if err != nil {
t.Fatal(err)
}
prettyPrint(wallet)
prettyPrint(accounts)
if accounts.Items[0].Name == "" {
t.Fatalf("account name is empty")
}
}
func TestClient_WalletList(t *testing.T) {
d := NewClient("")
id := "lbry#wallet#id:" + fmt.Sprintf("%d", rand.Int())
_, err := d.WalletList(id, 1, 20)
if err == nil {
t.Fatalf("wallet %v was unexpectedly found", id)
}
derr, ok := err.(Error)
if !ok {
t.Fatalf("unknown error returned: %s", err)
}
if derr.Name != ErrorWalletNotLoaded {
t.Fatal(err)
}
_, err = d.WalletCreate(id, &WalletCreateOpts{CreateAccount: true, SingleKey: true})
if err != nil {
t.Fatal(err)
}
wList, err := d.WalletList(id, 1, 20)
if err != nil {
t.Fatal(err)
}
if len(wList.Items) < 1 {
t.Fatal("wallet list is empty")
}
if (wList.Items)[0].ID != id {
t.Fatalf("wallet ID mismatch, expected %q, got %q", id, (wList.Items)[0].ID)
}
}
func TestClient_WalletRemoveWalletAdd(t *testing.T) {
d := NewClient("")
id := "lbry#wallet#id:" + fmt.Sprintf("%d", rand.Int())
wallet, err := d.WalletCreate(id, nil)
if err != nil {
t.Fatal(err)
}
_, err = d.WalletRemove(id)
if err != nil {
t.Fatal(err)
}
addedWallet, err := d.WalletAdd(id)
if err != nil {
t.Fatal(err)
}
if addedWallet.ID != wallet.ID {
prettyPrint(*addedWallet)
t.Fatalf("wallet ID mismatch, expected %q, got %q", wallet.ID, addedWallet.Name)
}
}
func TestClient_TransactionSummary(t *testing.T) {
d := NewClient("https://api.na-backend.odysee.com/api/v1/proxy")
r, err := d.TransactionShow("d104a1616c6af581e2046819de678f370d624e97cf176f95acaec4b183a42db6")
if err != nil {
t.Error(err)
}
if len(r.Outputs) != 2 {
t.Fatal("found wrong transaction")
}
if r.Outputs[0].Amount != "5.0" {
t.Error("found wrong lbc amount for transaction.")
}
}

View file

@ -0,0 +1,670 @@
package jsonrpc
import (
"encoding/hex"
"encoding/json"
"io/ioutil"
"net/http"
"os"
"reflect"
"strings"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
schema "github.com/lbryio/lbry.go/v2/schema/stake"
lbryschema "github.com/lbryio/types/v2/go"
"github.com/shopspring/decimal"
)
type Currency string
const (
CurrencyLBC = Currency("LBC")
CurrencyUSD = Currency("USD")
CurrencyBTC = Currency("BTC")
)
type Fee struct {
FeeCurrency Currency `json:"fee_currency"`
FeeAmount decimal.Decimal `json:"fee_amount"`
FeeAddress *string `json:"fee_address"`
}
type File struct {
AddedOn int64 `json:"added_on"`
BlobsCompleted uint64 `json:"blobs_completed"`
BlobsInStream uint64 `json:"blobs_in_stream"`
BlobsRemaining uint64 `json:"blobs_remaining"`
ChannelClaimID string `json:"channel_claim_id"`
ChannelName string `json:"channel_name"`
ClaimID string `json:"claim_id"`
ClaimName string `json:"claim_name"`
Completed bool `json:"completed"`
Confirmations int64 `json:"confirmations"`
ContentFee *Fee `json:"content_fee"`
DownloadDirectory string `json:"download_directory"`
DownloadPath string `json:"download_path"`
FileName string `json:"file_name"`
Height int `json:"height"`
IsFullyReflected bool `json:"is_fully_reflected"`
Key string `json:"key"`
Value *lbryschema.Claim `json:"protobuf"`
MimeType string `json:"mime_type"`
Nout int `json:"nout"`
Outpoint string `json:"outpoint"`
PurchaseReceipt interface{} `json:"purchase_receipt"`
ReflectorProgress int `json:"reflector_progress"`
SdHash string `json:"sd_hash"`
Status string `json:"status"`
Stopped bool `json:"stopped"`
StreamHash string `json:"stream_hash"`
StreamName string `json:"stream_name"`
StreamingURL string `json:"streaming_url"`
SuggestedFileName string `json:"suggested_file_name"`
Timestamp int64 `json:"timestamp"`
TotalBytes uint64 `json:"total_bytes"`
TotalBytesLowerBound uint64 `json:"total_bytes_lower_bound"`
Txid string `json:"txid"`
UploadingToReflector bool `json:"uploading_to_reflector"`
WrittenBytes uint64 `json:"written_bytes"`
}
func getEnumVal(enum map[string]int32, data interface{}) (int32, error) {
s, ok := data.(string)
if !ok {
return 0, errors.Err("expected a string")
}
val, ok := enum[s]
if !ok {
return 0, errors.Err("invalid enum key")
}
return val, nil
}
func fixDecodeProto(src, dest reflect.Type, data interface{}) (interface{}, error) {
switch dest {
case reflect.TypeOf(uint64(0)):
if n, ok := data.(json.Number); ok {
val, err := n.Int64()
if err != nil {
return nil, errors.Wrap(err, 0)
} else if val < 0 {
return nil, errors.Err("must be unsigned int")
}
return uint64(val), nil
}
case reflect.TypeOf([]byte{}):
if s, ok := data.(string); ok {
return []byte(s), nil
}
case reflect.TypeOf(decimal.Decimal{}):
if n, ok := data.(json.Number); ok {
val, err := n.Float64()
if err != nil {
return nil, errors.Wrap(err, 0)
}
return decimal.NewFromFloat(val), nil
} else if s, ok := data.(string); ok {
d, err := decimal.NewFromString(s)
if err != nil {
return nil, errors.Wrap(err, 0)
}
return d, nil
}
case reflect.TypeOf(lbryschema.Fee_Currency(0)):
val, err := getEnumVal(lbryschema.Fee_Currency_value, data)
return lbryschema.Fee_Currency(val), err
case reflect.TypeOf(lbryschema.Claim{}):
blockChainName := os.Getenv("BLOCKCHAIN_NAME")
if blockChainName == "" {
blockChainName = "lbrycrd_main"
}
claim, err := schema.DecodeClaimHex(data.(string), blockChainName)
if err != nil {
return nil, err
}
return claim.Claim, nil
}
return data, nil
}
type WalletBalanceResponse decimal.Decimal
type PeerListResponsePeer struct {
IP string `json:"host"`
Port uint `json:"port"`
NodeId string `json:"node_id"`
}
type PeerListResponse []PeerListResponsePeer
type BlobGetResponse struct {
Blobs []struct {
BlobHash string `json:"blob_hash,omitempty"`
BlobNum int `json:"blob_num"`
IV string `json:"iv"`
Length int `json:"length"`
} `json:"blobs"`
Key string `json:"key"`
StreamHash string `json:"stream_hash"`
StreamName string `json:"stream_name"`
StreamType string `json:"stream_type"`
SuggestedFileName string `json:"suggested_file_name"`
}
type StreamCostEstimateResponse decimal.Decimal
type BlobAvailability struct {
IsAvailable bool `json:"is_available"`
ReachablePeers []string `json:"reachable_peers"`
UnReachablePeers []string `json:"unreachable_peers"`
}
type StreamAvailabilityResponse struct {
IsAvailable bool `json:"is_available"`
DidDecode bool `json:"did_decode"`
DidResolve bool `json:"did_resolve"`
IsStream bool `json:"is_stream"`
NumBlobsInStream uint64 `json:"num_blobs_in_stream"`
SDHash string `json:"sd_hash"`
SDBlobAvailability BlobAvailability `json:"sd_blob_availability"`
HeadBlobHash string `json:"head_blob_hash"`
HeadBlobAvailability BlobAvailability `json:"head_blob_availability"`
UseUPNP bool `json:"use_upnp"`
UPNPRedirectIsSet bool `json:"upnp_redirect_is_set"`
Error string `json:"error,omitempty"`
}
type GetResponse File
type FileListResponse struct {
Items []File `json:"items"`
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
TotalPages uint64 `json:"total_pages"`
}
type Account struct {
AddressGenerator struct {
Change struct {
Gap uint64 `json:"gap"`
MaximumUsesPerAddress uint64 `json:"maximum_uses_per_address"`
} `json:"change"`
Name string `json:"name"`
Receiving struct {
Gap uint64 `json:"gap"`
MaximumUsesPerAddress uint64 `json:"maximum_uses_per_address"`
} `json:"receiving"`
} `json:"address_generator"`
Certificates uint64 `json:"certificates"`
Coins float64 `json:"coins"`
Encrypted bool `json:"encrypted"`
ID string `json:"id"`
IsDefault bool `json:"is_default"`
Ledger *string `json:"ledger,omitempty"`
ModifiedOn *float64 `json:"modified_on,omitempty"`
Name string `json:"name"`
Preferences *struct {
Theme string `json:"theme"`
} `json:"preferences,omitempty"`
PrivateKey *string `json:"private_key,omitempty"`
PublicKey string `json:"public_key"`
Seed *string `json:"seed,omitempty"`
Satoshis uint64 `json:"satoshis"`
}
type AccountListResponse struct {
Items []Account `json:"items"`
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
TotalPages uint64 `json:"total_pages"`
}
type AccountBalanceResponse struct {
Available decimal.Decimal `json:"available"`
Reserved decimal.Decimal `json:"reserved"`
ReservedSubtotals struct {
Claims decimal.Decimal `json:"claims"`
Supports decimal.Decimal `json:"supports"`
Tips decimal.Decimal `json:"tips"`
} `json:"reserved_subtotals"`
Total decimal.Decimal `json:"total"`
}
type Transaction struct {
Address string `json:"address"`
Amount string `json:"amount"`
ClaimID string `json:"claim_id"`
ClaimOp string `json:"claim_op"`
Confirmations int `json:"confirmations"`
HasSigningKey bool `json:"has_signing_key"`
Height int `json:"height"`
IsInternalTransfer bool `json:"is_internal_transfer"`
IsMyInput bool `json:"is_my_input"`
IsMyOutput bool `json:"is_my_output"`
IsSpent bool `json:"is_spent"`
Name string `json:"name"`
NormalizedName string `json:"normalized_name"`
Nout uint64 `json:"nout"`
PermanentUrl string `json:"permanent_url"`
SigningChannel *Claim `json:"signing_channel,omitempty"`
TimeStamp uint64 `json:"time_stamp"`
Txid string `json:"txid"`
Type string `json:"type"`
Value *lbryschema.Claim `json:"protobuf,omitempty"`
}
type TransactionSummary struct {
Height int `json:"height"`
Hex string `json:"hex"`
Inputs []Transaction `json:"inputs"`
Outputs []Transaction `json:"outputs"`
TotalFee string `json:"total_fee"`
TotalInput string `json:"total_input"`
TotalOutput string `json:"total_output"`
Txid string `json:"txid"`
}
type AccountFundResponse TransactionSummary
type Address string
type AddressUnusedResponse Address
type AddressListResponse struct {
Items []struct {
Account string `json:"account"`
Address Address `json:"address"`
Pubkey string `json:"pubkey"`
UsedTimes uint64 `json:"used_times"`
} `json:"items"`
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
TotalPages uint64 `json:"total_pages"`
}
type ChannelExportResponse string
type ChannelImportResponse string
type ChannelListResponse struct {
Items []Transaction `json:"items"`
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
TotalPages uint64 `json:"total_pages"`
}
type ClaimAbandonResponse struct {
Success bool `json:"success"`
Tx TransactionSummary `json:"tx"`
}
type Support struct {
Amount string `json:"amount"`
Nout uint64 `json:"nout"`
Txid string `json:"txid"`
}
type PurchaseReceipt struct {
Address string `json:"file_name"`
Amount string `json:"amount"`
ClaimID string `json:"claim_id"`
Confirmations int `json:"confirmations"`
Height int `json:"height"`
Nout uint64 `json:"nout"`
Timestamp uint64 `json:"timestamp"`
Txid string `json:"txid"`
Type string `json:"purchase"`
}
type Claim struct {
Address string `json:"address"`
Amount string `json:"amount"`
CanonicalURL string `json:"canonical_url"`
ChannelID string `json:"channel_id"`
ClaimID string `json:"claim_id"`
ClaimOp string `json:"claim_op,omitempty"`
Confirmations int `json:"confirmations"`
Height int `json:"height"`
IsChange bool `json:"is_change,omitempty"`
IsChannelSignatureValid bool `json:"is_channel_signature_valid,omitempty"`
IsInternalTransfer bool `json:"is_internal_transfer"`
IsMyInput bool `json:"is_my_input"`
IsMyOutput bool `json:"is_my_output"`
IsSpent bool `json:"is_spent"`
Meta Meta `json:"meta,omitempty"`
Name string `json:"name"`
NormalizedName string `json:"normalized_name"`
Nout uint64 `json:"nout"`
PermanentURL string `json:"permanent_url"`
PurchaseReceipt *PurchaseReceipt `json:"purchase_receipt,omitempty"`
ShortURL string `json:"short_url"`
SigningChannel *Claim `json:"signing_channel,omitempty"`
Timestamp int `json:"timestamp"`
Txid string `json:"txid"`
Type string `json:"type,omitempty"`
Value lbryschema.Claim `json:"protobuf,omitempty"`
ValueType string `json:"value_type,omitempty"`
AbsoluteChannelPosition int `json:"absolute_channel_position,omitempty"`
ChannelName string `json:"channel_name,omitempty"`
ClaimSequence int64 `json:"claim_sequence,omitempty"`
DecodedClaim bool `json:"decoded_claim,omitempty"`
EffectiveAmount string `json:"effective_amount,omitempty"`
HasSignature *bool `json:"has_signature,omitempty"`
SignatureIsValid *bool `json:"signature_is_valid,omitempty"`
Supports []Support `json:"supports,omitempty"`
ValidAtHeight int `json:"valid_at_height,omitempty"`
}
type Meta struct {
ActivationHeight int64 `json:"activation_height,omitempty"`
CreationHeight int64 `json:"creation_height,omitempty"`
CreationTimestamp int `json:"creation_timestamp,omitempty"`
EffectiveAmount string `json:"effective_amount,omitempty"`
ExpirationHeight int64 `json:"expiration_height,omitempty"`
IsControlling bool `json:"is_controlling,omitempty"`
SupportAmount string `json:"support_amount,omitempty"`
TrendingGlobal float64 `json:"trending_global,omitempty"`
TrendingGroup float64 `json:"trending_group,omitempty"`
TrendingLocal float64 `json:"trending_local,omitempty"`
TrendingMixed float64 `json:"trending_mixed,omitempty"`
}
const coldStorageURL = "https://s3.wasabisys.com/blobs.lbry.com/"
// GetStreamSizeByMagic uses "magic" to not just estimate, but actually return the exact size of a stream
// It does so by fetching the sd blob and the last blob from our S3 bucket, decrypting and unpadding the last blob
// adding up all full blobs that have a known size and finally adding the real last blob size too.
// This will only work if we host at least the sd blob and the last blob on S3, if not, this will error.
func (c *Claim) GetStreamSizeByMagic() (streamSize uint64, e error) {
if c.Value.GetStream() == nil {
return 0, errors.Err("this claim is not a stream")
}
resp, err := http.Get(coldStorageURL + hex.EncodeToString(c.Value.GetStream().Source.SdHash))
if err != nil {
return 0, errors.Err(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return 0, errors.Err(err)
}
sdb := &stream.SDBlob{}
err = sdb.UnmarshalJSON(body)
if err != nil {
return 0, err
}
lastBlobIndex := len(sdb.BlobInfos) - 2
lastBlobHash := sdb.BlobInfos[lastBlobIndex].BlobHash
if len(sdb.BlobInfos) > 2 {
streamSize = uint64(stream.MaxBlobSize-1) * uint64(len(sdb.BlobInfos)-2)
}
resp2, err := http.Get(coldStorageURL + hex.EncodeToString(lastBlobHash))
if err != nil {
return 0, errors.Err(err)
}
defer resp2.Body.Close()
body2, err := ioutil.ReadAll(resp2.Body)
if err != nil {
return 0, errors.Err(err)
}
defer func() {
if r := recover(); r != nil {
e = errors.Err("recovered from DecryptBlob panic for blob %s", lastBlobHash)
}
}()
lastBlob, err := stream.DecryptBlob(body2, sdb.Key, sdb.BlobInfos[lastBlobIndex].IV)
if err != nil {
return 0, errors.Err(err)
}
streamSize += uint64(len(lastBlob))
return streamSize, nil
}
const (
ProtectedContentTag = SpecialContentType("c:members-only")
PurchaseContentTag = SpecialContentType("c:purchase:")
RentalContentTag = SpecialContentType("c:rental:")
PreorderContentTag = SpecialContentType("c:preorder:")
LegacyPurchaseContentTag = SpecialContentType("purchase:")
LegacyRentalContentTag = SpecialContentType("rental:")
LegacyPreorderContentTag = SpecialContentType("preorder:")
ScheduledShowContentTag = SpecialContentType("c:scheduled:show")
ScheduledHideContentTag = SpecialContentType("c:scheduled:hide")
UnlistedContentTag = SpecialContentType("c:unlisted")
)
type SpecialContentType string
//IsContentSpecial returns true if the claim is of a special content type
func (c *Claim) IsContentSpecial(specialTags ...SpecialContentType) bool {
for _, t := range c.Value.GetTags() {
for _, ct := range specialTags {
if strings.Contains(t, string(ct)) {
return true
}
}
}
return false
}
type StreamListResponse struct {
Items []Claim `json:"items"`
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
TotalPages uint64 `json:"total_pages"`
}
type ClaimListResponse struct {
Claims []Claim `json:"items"`
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
TotalPages uint64 `json:"total_pages"`
}
type ClaimSearchResponse ClaimListResponse
type SupportListResponse struct {
Items []Claim
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
TotalPages uint64 `json:"total_pages"`
}
type StatusResponse struct {
BlobManager struct {
Connections struct {
MaxIncomingMbs float64 `json:"max_incoming_mbs"`
MaxOutgoingMbs float64 `json:"max_outgoing_mbs"`
TotalIncomingMbs float64 `json:"total_incoming_mbs"`
TotalOutgoingMbs float64 `json:"total_outgoing_mbs"`
TotalReceived int64 `json:"total_received"`
TotalSent int64 `json:"total_sent"`
} `json:"connections"`
FinishedBlobs int64 `json:"finished_blobs"`
} `json:"blob_manager"`
ConnectionStatus struct {
Code string `json:"code"`
Message string `json:"message"`
} `json:"connection_status"`
Dht struct {
NodeID string `json:"node_id"`
PeersInRoutingTable uint64 `json:"peers_in_routing_table"`
} `json:"dht"`
FfmpegStatus struct {
AnalyzeAudioVolume bool `json:"analyze_audio_volume"`
Available bool `json:"available"`
Which string `json:"which"`
} `json:"ffmpeg_status"`
FileManager struct {
ManagedFiles int64 `json:"managed_files"`
} `json:"file_manager"`
HashAnnouncer struct {
AnnounceQueueSize uint64 `json:"announce_queue_size"`
} `json:"hash_announcer"`
InstallationID string `json:"installation_id"`
IsRunning bool `json:"is_running"`
SkippedComponents []string `json:"skipped_components"`
StartupStatus struct {
BlobManager bool `json:"blob_manager"`
Database bool `json:"database"`
Dht bool `json:"dht"`
ExchangeRateManager bool `json:"exchange_rate_manager"`
FileManager bool `json:"file_manager"`
HashAnnouncer bool `json:"hash_announcer"`
LibtorrentComponent bool `json:"libtorrent_component"`
PeerProtocolServer bool `json:"peer_protocol_server"`
Upnp bool `json:"upnp"`
Wallet bool `json:"wallet"`
WalletServerPayments bool `json:"wallet_server_payments"`
} `json:"startup_status"`
Upnp struct {
AioupnpVersion string `json:"aioupnp_version"`
DhtRedirectSet bool `json:"dht_redirect_set"`
ExternalIp string `json:"external_ip"`
Gateway string `json:"gateway"`
PeerRedirectSet bool `json:"peer_redirect_set"`
Redirects struct{} `json:"redirects"`
} `json:"upnp"`
Wallet struct {
AvailableServers int `json:"available_servers"`
BestBlockhash string `json:"best_blockhash"`
Blocks int `json:"blocks"`
BlocksBehind int `json:"blocks_behind"`
Connected string `json:"connected"`
ConnectedFeatures struct {
DailyFee string `json:"daily_fee"`
Description string `json:"description"`
DonationAddress string `json:"donation_address"`
GenesisHash string `json:"genesis_hash"`
HashFunction string `json:"hash_function"`
Hosts struct {
} `json:"hosts"`
PaymentAddress string `json:"payment_address"`
ProtocolMax string `json:"protocol_max"`
ProtocolMin string `json:"protocol_min"`
Pruning interface{} `json:"pruning"`
ServerVersion string `json:"server_version"`
TrendingAlgorithm string `json:"trending_algorithm"`
} `json:"connected_features"`
HeadersSynchronizationProgress int `json:"headers_synchronization_progress"`
KnownServers int `json:"known_servers"`
Servers []struct {
Availability bool `json:"availability"`
Host string `json:"host"`
Latency float64 `json:"latency"`
Port int `json:"port"`
} `json:"servers"`
} `json:"wallet"`
WalletServerPayments struct {
MaxFee string `json:"max_fee"`
Running bool `json:"running"`
} `json:"wallet_server_payments"`
}
type UTXOListResponse struct {
Items []struct {
Address string `json:"address"`
Amount string `json:"amount"`
Confirmations int `json:"confirmations"`
Height int `json:"height"`
IsInternalTransfer bool `json:"is_internal_transfer"`
IsMyInput bool `json:"is_my_input"`
IsMyOutput bool `json:"is_my_output"`
IsSpent bool `json:"is_spent"`
Nout int `json:"nout"`
Timestamp int64 `json:"timestamp"`
Txid string `json:"txid"`
Type string `json:"type"`
} `json:"items"`
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
TotalPages uint64 `json:"total_pages"`
}
type UTXOReleaseResponse *string
type transactionListBlob struct {
Address string `json:"address"`
Amount string `json:"amount"`
BalanceDelta string `json:"balance_delta"`
ClaimId string `json:"claim_id"`
ClaimName string `json:"claim_name"`
IsSpent bool `json:"is_spent"`
Nout int `json:"nout"`
}
//TODO: this repeats all the fields from transactionListBlob which doesn't make sense
// but if i extend the type with transactionListBlob it doesn't fill the fields. does our unmarshaller crap out on these?
type supportBlob struct {
Address string `json:"address"`
Amount string `json:"amount"`
BalanceDelta string `json:"balance_delta"`
ClaimId string `json:"claim_id"`
ClaimName string `json:"claim_name"`
IsSpent bool `json:"is_spent"`
IsTip bool `json:"is_tip"`
Nout int `json:"nout"`
}
type TransactionListResponse struct {
Items []struct {
AbandonInfo []transactionListBlob `json:"abandon_info"`
ClaimInfo []transactionListBlob `json:"claim_info"`
Confirmations int64 `json:"confirmations"`
Date string `json:"date"`
Fee string `json:"fee"`
SupportInfo []supportBlob `json:"support_info"`
Timestamp int64 `json:"timestamp"`
Txid string `json:"txid"`
UpdateInfo []transactionListBlob `json:"update_info"`
Value string `json:"value"`
} `json:"items"`
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
TotalPages uint64 `json:"total_pages"`
}
type VersionResponse struct {
Build string `json:"build"`
Desktop string `json:"desktop"`
Distro struct {
Codename string `json:"codename"`
ID string `json:"id"`
Like string `json:"like"`
Version string `json:"version"`
VersionParts struct {
BuildNumber string `json:"build_number"`
Major string `json:"major"`
Minor string `json:"minor"`
} `json:"version_parts"`
} `json:"distro"`
LbrynetVersion string `json:"lbrynet_version"`
OsRelease string `json:"os_release"`
OsSystem string `json:"os_system"`
Platform string `json:"platform"`
Processor string `json:"processor"`
PythonVersion string `json:"python_version"`
Version string `json:"version"`
}
type ResolveResponse map[string]Claim
type ClaimShowResponse *Claim
type Wallet struct {
ID string `json:"id"`
Name string `json:"name"`
}
type WalletList struct {
Items []Wallet `json:"items"`
Page uint64 `json:"page"`
PageSize uint64 `json:"page_size"`
TotalPages uint64 `json:"total_pages"`
}

269
extras/lbryinc/client.go Normal file
View file

@ -0,0 +1,269 @@
package lbryinc
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"time"
"golang.org/x/oauth2"
log "github.com/sirupsen/logrus"
)
const (
defaultServerAddress = "https://api.odysee.tv"
timeout = 5 * time.Second
headerForwardedFor = "X-Forwarded-For"
userObjectPath = "user"
userMeMethod = "me"
userHasVerifiedEmailMethod = "has_verified_email"
)
// Client stores data about internal-apis call it is about to make.
type Client struct {
AuthToken string
OAuthToken oauth2.TokenSource
Logger *log.Logger
serverAddress string
extraHeaders map[string]string
}
// ClientOpts allow to provide extra parameters to NewClient:
// - ServerAddress
// - RemoteIP — to forward the IP of a frontend client making the request
type ClientOpts struct {
ServerAddress string
RemoteIP string
}
// APIResponse reflects internal-apis JSON response format.
type APIResponse struct {
Success bool `json:"success"`
Error *string `json:"error"`
Data interface{} `json:"data"`
}
type data struct {
obj map[string]interface{}
array []interface{}
}
func (d data) IsObject() bool {
return d.obj != nil
}
func (d data) IsArray() bool {
return d.array != nil
}
func (d data) Object() (map[string]interface{}, error) {
if d.obj == nil {
return nil, errors.New("no object data found")
}
return d.obj, nil
}
func (d data) Array() ([]interface{}, error) {
if d.array == nil {
return nil, errors.New("no array data found")
}
return d.array, nil
}
// APIError wraps errors returned by LBRY API server to discern them from other kinds (like http errors).
type APIError struct {
Err error
}
func (e APIError) Error() string {
return fmt.Sprintf("api error: %v", e.Err)
}
// ResponseData is a map containing parsed json response.
type ResponseData interface {
IsObject() bool
IsArray() bool
Object() (map[string]interface{}, error)
Array() ([]interface{}, error)
}
func makeMethodPath(obj, method string) string {
return fmt.Sprintf("/%s/%s", obj, method)
}
// NewClient returns a client instance for internal-apis. It requires authToken to be provided
// for authentication.
func NewClient(authToken string, opts *ClientOpts) Client {
c := Client{
serverAddress: defaultServerAddress,
extraHeaders: make(map[string]string),
AuthToken: authToken,
Logger: log.StandardLogger(),
}
if opts != nil {
if opts.ServerAddress != "" {
c.serverAddress = opts.ServerAddress
}
if opts.RemoteIP != "" {
c.extraHeaders[headerForwardedFor] = opts.RemoteIP
}
}
return c
}
// NewOauthClient returns a client instance for internal-apis. It requires Oauth Token Source to be provided
// for authentication.
func NewOauthClient(token oauth2.TokenSource, opts *ClientOpts) Client {
c := Client{
serverAddress: defaultServerAddress,
extraHeaders: make(map[string]string),
OAuthToken: token,
Logger: log.StandardLogger(),
}
if opts != nil {
if opts.ServerAddress != "" {
c.serverAddress = opts.ServerAddress
}
if opts.RemoteIP != "" {
c.extraHeaders[headerForwardedFor] = opts.RemoteIP
}
}
return c
}
func (c Client) getEndpointURL(object, method string) string {
return fmt.Sprintf("%s%s", c.serverAddress, makeMethodPath(object, method))
}
func (c Client) getEndpointURLFromPath(path string) string {
return fmt.Sprintf("%s%s", c.serverAddress, path)
}
func (c Client) prepareParams(params map[string]interface{}) (string, error) {
form := url.Values{}
if c.AuthToken != "" {
form.Add("auth_token", c.AuthToken)
} else if c.OAuthToken == nil {
return "", errors.New("oauth token source must be supplied")
}
for k, v := range params {
if k == "auth_token" {
return "", errors.New("extra auth_token supplied in request params")
}
form.Add(k, fmt.Sprintf("%v", v))
}
return form.Encode(), nil
}
func (c Client) doCall(url string, payload string) ([]byte, error) {
var body []byte
c.Logger.Debugf("sending payload: %s", payload)
req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer([]byte(payload)))
if err != nil {
return body, err
}
req.Header.Add("Accept", "application/json")
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
if c.OAuthToken != nil {
t, err := c.OAuthToken.Token()
if err != nil {
return nil, err
}
if t.Type() != "Bearer" {
return nil, errors.New("internal-apis requires an oAuth token of type 'Bearer'")
}
t.SetAuthHeader(req)
}
for k, v := range c.extraHeaders {
req.Header.Set(k, v)
}
client := &http.Client{Timeout: timeout}
r, err := client.Do(req)
if err != nil {
return body, err
}
if r.StatusCode >= 500 {
return body, fmt.Errorf("server returned non-OK status: %v", r.StatusCode)
}
defer r.Body.Close()
return ioutil.ReadAll(r.Body)
}
// CallResource calls a remote internal-apis server resource, returning a response,
// wrapped into standardized API Response struct.
func (c Client) CallResource(object, method string, params map[string]interface{}) (ResponseData, error) {
var d data
payload, err := c.prepareParams(params)
if err != nil {
return d, err
}
body, err := c.doCall(c.getEndpointURL(object, method), payload)
if err != nil {
return d, err
}
var ar APIResponse
err = json.Unmarshal(body, &ar)
if err != nil {
return d, err
}
if !ar.Success {
return d, APIError{errors.New(*ar.Error)}
}
if v, ok := ar.Data.([]interface{}); ok {
d.array = v
} else if v, ok := ar.Data.(map[string]interface{}); ok {
d.obj = v
}
return d, err
}
// Call calls a remote internal-apis server, returning a response,
// wrapped into standardized API Response struct.
func (c Client) Call(path string, params map[string]interface{}) (ResponseData, error) {
var d data
payload, err := c.prepareParams(params)
if err != nil {
return d, err
}
body, err := c.doCall(c.getEndpointURLFromPath(path), payload)
if err != nil {
return d, err
}
var ar APIResponse
err = json.Unmarshal(body, &ar)
if err != nil {
return d, err
}
if !ar.Success {
return d, APIError{errors.New(*ar.Error)}
}
if v, ok := ar.Data.([]interface{}); ok {
d.array = v
} else if v, ok := ar.Data.(map[string]interface{}); ok {
d.obj = v
}
return d, err
}
// UserMe returns user details for the user associated with the current auth_token.
func (c Client) UserMe() (ResponseData, error) {
return c.CallResource(userObjectPath, userMeMethod, map[string]interface{}{})
}
// UserHasVerifiedEmail calls has_verified_email method.
func (c Client) UserHasVerifiedEmail() (ResponseData, error) {
return c.CallResource(userObjectPath, userHasVerifiedEmailMethod, map[string]interface{}{})
}

View file

@ -0,0 +1,182 @@
package lbryinc
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"golang.org/x/oauth2"
"github.com/stretchr/testify/assert"
)
func launchDummyServer(lastReq **http.Request, path, response string, status int) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if lastReq != nil {
*lastReq = &*r
}
authT := r.FormValue("auth_token")
if authT == "" {
accessT := r.Header.Get("Authorization")
if accessT == "" {
w.WriteHeader(http.StatusUnauthorized)
return
}
}
if r.URL.Path != path {
fmt.Printf("path doesn't match: %v != %v", r.URL.Path, path)
w.WriteHeader(http.StatusNotFound)
} else {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(status)
w.Write([]byte(response))
}
}))
}
func TestUserMe(t *testing.T) {
ts := launchDummyServer(nil, makeMethodPath(userObjectPath, userMeMethod), userMeResponse, http.StatusOK)
defer ts.Close()
c := NewClient("realToken", &ClientOpts{ServerAddress: ts.URL})
r, err := c.UserMe()
assert.Nil(t, err)
robj, err := r.Object()
if err != nil {
t.Fatal(err)
}
assert.Equal(t, "user@lbry.tv", robj["primary_email"])
}
func TestListFiltered(t *testing.T) {
ts := launchDummyServer(nil, "/file/list_filtered", listFilteredResponse, http.StatusOK)
defer ts.Close()
c := NewClient("realToken", &ClientOpts{ServerAddress: ts.URL})
r, err := c.CallResource("file", "list_filtered", map[string]interface{}{"with_claim_id": "true"})
assert.Nil(t, err)
assert.True(t, r.IsArray())
_, err = r.Array()
if err != nil {
t.Fatal(err)
}
}
func TestUserHasVerifiedEmail(t *testing.T) {
ts := launchDummyServer(nil, makeMethodPath(userObjectPath, userHasVerifiedEmailMethod), userHasVerifiedEmailResponse, http.StatusOK)
defer ts.Close()
c := NewClient("realToken", &ClientOpts{ServerAddress: ts.URL})
r, err := c.UserHasVerifiedEmail()
assert.Nil(t, err)
robj, err := r.Object()
if err != nil {
t.Error(err)
}
assert.EqualValues(t, 12345, robj["user_id"])
assert.Equal(t, true, robj["has_verified_email"])
}
func TestUserHasVerifiedEmailOAuth(t *testing.T) {
ts := launchDummyServer(nil, makeMethodPath(userObjectPath, userHasVerifiedEmailMethod), userHasVerifiedEmailResponse, http.StatusOK)
defer ts.Close()
c := NewOauthClient(oauth2.StaticTokenSource(&oauth2.Token{AccessToken: "Test-Access-Token"}), &ClientOpts{ServerAddress: ts.URL})
r, err := c.UserHasVerifiedEmail()
assert.Nil(t, err)
robj, err := r.Object()
if err != nil {
t.Error(err)
}
assert.EqualValues(t, 12345, robj["user_id"])
assert.Equal(t, true, robj["has_verified_email"])
}
func TestRemoteIP(t *testing.T) {
var req *http.Request
ts := launchDummyServer(&req, makeMethodPath(userObjectPath, userMeMethod), userMeResponse, http.StatusOK)
defer ts.Close()
c := NewClient("realToken", &ClientOpts{ServerAddress: ts.URL, RemoteIP: "8.8.8.8"})
_, err := c.UserMe()
assert.Nil(t, err)
assert.Equal(t, []string{"8.8.8.8"}, req.Header["X-Forwarded-For"])
}
func TestWrongToken(t *testing.T) {
c := NewClient("zcasdasc", nil)
r, err := c.UserHasVerifiedEmail()
assert.False(t, r.IsObject())
assert.EqualError(t, err, "api error: could not authenticate user")
assert.ErrorAs(t, err, &APIError{})
}
func TestHTTPError(t *testing.T) {
c := NewClient("zcasdasc", &ClientOpts{ServerAddress: "http://lolcathost"})
r, err := c.UserHasVerifiedEmail()
assert.False(t, r.IsObject())
assert.EqualError(t, err, `Post "http://lolcathost/user/has_verified_email": dial tcp: lookup lolcathost: no such host`)
}
func TestGatewayError(t *testing.T) {
var req *http.Request
ts := launchDummyServer(&req, makeMethodPath(userObjectPath, userHasVerifiedEmailMethod), "", http.StatusBadGateway)
defer ts.Close()
c := NewClient("zcasdasc", &ClientOpts{ServerAddress: ts.URL})
r, err := c.UserHasVerifiedEmail()
assert.False(t, r.IsObject())
assert.EqualError(t, err, `server returned non-OK status: 502`)
}
const userMeResponse = `{
"success": true,
"error": null,
"data": {
"id": 12345,
"language": "en",
"given_name": null,
"family_name": null,
"created_at": "2019-01-17T12:13:06Z",
"updated_at": "2019-05-02T13:57:59Z",
"invited_by_id": null,
"invited_at": null,
"invites_remaining": 0,
"invite_reward_claimed": false,
"is_email_enabled": true,
"manual_approval_user_id": 654,
"reward_status_change_trigger": "manual",
"primary_email": "user@lbry.tv",
"has_verified_email": true,
"is_identity_verified": false,
"is_reward_approved": true,
"groups": []
}
}`
const userHasVerifiedEmailResponse = `{
"success": true,
"error": null,
"data": {
"user_id": 12345,
"has_verified_email": true
}
}`
const listFilteredResponse = `{
"success": true,
"error": null,
"data": [
{
"claim_id": "322ce77e9085d9da42279c790f7c9755b4916fca",
"outpoint": "20e04af21a569061ced7aa1801a43b4ed4839dfeb79919ea49a4059c7fe114c5:0"
},
{
"claim_id": "61496c567badcd98b82d9a700a8d56fd8a5fa8fb",
"outpoint": "657e4ec774524b326f9d3ecb9f468ea085bd1f3d450565f0330feca02e8fd25b:0"
}
]
}`

View file

@ -234,7 +234,7 @@ func TestConversions(t *testing.T) {
if bp, boolTest := ct.d.(*bool); boolTest && *bp != ct.wantbool && ct.wanterr == "" {
errf("want bool %v, got %v", ct.wantbool, *bp)
}
if !ct.wanttime.IsNull() && !ct.wanttime.Equal(getTimeValue(ct.d)) {
if !ct.wanttime.IsZero() && !ct.wanttime.Equal(getTimeValue(ct.d)) {
errf("want time %v, got %v", ct.wanttime, getTimeValue(ct.d))
}
if ct.wantnil && *ct.d.(**int64) != nil {

Some files were not shown because too many files have changed in this diff Show more