big switch to jekyll

This commit is contained in:
Alex Grintsvayg 2018-12-20 19:00:02 -05:00
parent db8421f499
commit 4e38c384b8
17 changed files with 199 additions and 1843 deletions

5
.gitignore vendored Normal file
View file

@ -0,0 +1,5 @@
_site
.sass-cache
.jekyll-metadata
.bundle
vendor

24
404.html Normal file
View file

@ -0,0 +1,24 @@
---
layout: default
---
<style type="text/css" media="screen">
.container {
margin: 10px auto;
max-width: 600px;
text-align: center;
}
h1 {
margin: 30px 0;
font-size: 4em;
line-height: 1;
letter-spacing: -1px;
}
</style>
<div class="container">
<h1>404</h1>
<p><strong>Page not found :(</strong></p>
<p>The requested page could not be found.</p>
</div>

19
Gemfile Normal file
View file

@ -0,0 +1,19 @@
source "https://rubygems.org"
gem "jekyll", "~> 3.8.5"
# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
# uncomment the line below. To upgrade, run `bundle update github-pages`.
# gem "github-pages", group: :jekyll_plugins
# If you have any plugins, put them here!
group :jekyll_plugins do
gem "jekyll-feed", "~> 0.6"
end
# Windows does not include zoneinfo files, so bundle the tzinfo-data gem
gem "tzinfo-data", platforms: [:mingw, :mswin, :x64_mingw, :jruby]
# Performance-booster for watching directories on Windows
gem "wdm", "~> 0.1.0" if Gem.win_platform?

67
Gemfile.lock Normal file
View file

@ -0,0 +1,67 @@
GEM
remote: https://rubygems.org/
specs:
addressable (2.5.2)
public_suffix (>= 2.0.2, < 4.0)
colorator (1.1.0)
concurrent-ruby (1.1.4)
em-websocket (0.5.1)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0.6.0)
eventmachine (1.2.7)
ffi (1.9.25)
forwardable-extended (2.6.0)
http_parser.rb (0.6.0)
i18n (0.9.5)
concurrent-ruby (~> 1.0)
jekyll (3.8.5)
addressable (~> 2.4)
colorator (~> 1.0)
em-websocket (~> 0.5)
i18n (~> 0.7)
jekyll-sass-converter (~> 1.0)
jekyll-watch (~> 2.0)
kramdown (~> 1.14)
liquid (~> 4.0)
mercenary (~> 0.3.3)
pathutil (~> 0.9)
rouge (>= 1.7, < 4)
safe_yaml (~> 1.0)
jekyll-feed (0.11.0)
jekyll (~> 3.3)
jekyll-sass-converter (1.5.2)
sass (~> 3.4)
jekyll-watch (2.1.2)
listen (~> 3.0)
kramdown (1.17.0)
liquid (4.0.1)
listen (3.1.5)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
ruby_dep (~> 1.2)
mercenary (0.3.6)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (3.0.3)
rb-fsevent (0.10.3)
rb-inotify (0.10.0)
ffi (~> 1.0)
rouge (3.3.0)
ruby_dep (1.5.0)
safe_yaml (1.0.4)
sass (3.7.2)
sass-listen (~> 4.0.0)
sass-listen (4.0.0)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
PLATFORMS
ruby
DEPENDENCIES
jekyll (~> 3.8.5)
jekyll-feed (~> 0.6)
tzinfo-data
BUNDLED WITH
1.16.6

18
_config.yml Normal file
View file

@ -0,0 +1,18 @@
title: LBRY Protocol Specification
email: grin@lbry.io
baseurl: "" # the subpath of your site, e.g. /blog
url: "" # the base hostname & protocol for your site, e.g. http://example.com
markdown: kramdown
# Exclude from processing.
# The following items will not be processed, by default. Create a custom list
# to override the default setting.
# exclude:
# - Gemfile
# - Gemfile.lock
# - node_modules
# - vendor/bundle/
# - vendor/cache/
# - vendor/gems/
# - vendor/ruby/

View file

@ -1,7 +1,26 @@
<!DOCTYPE html>
<html>
<head>
<title>{{ site.title }}</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1">
<link rel="stylesheet" type="text/css" href="normalize.css">
<link rel="stylesheet" type="text/css" href="tocbot.css">
<link rel="stylesheet" type="text/css" href="style.css">
</head>
<body>
<main>
<h1>LBRY: A Decentralized Digital Content Marketplace</h1>
<div class="toc-menu">Menu</div>
<nav class="toc"></nav>
<div id="content">
{{ content }}
</div>
</main>
<script src="https://hypothes.is/embed.js" async></script>
<script src="tocbot.min.js"></script>
<script>
function ready(fn) {
@ -29,6 +48,18 @@
}
ready(function() {
// Handle external links
const links = document.links;
for (var i = 0; i < links.length; i++) {
if (links[i].hostname != window.location.hostname) {
links[i].target = '_blank';
links[i].className += ' external-link';
links[i].rel = "noopener";
}
}
// TOCbot
var options = {
tocSelector: '.toc',
contentSelector: '#content',
@ -54,7 +85,7 @@
tocbot.refresh(o);
}, 250);
});
})
</script>
</body>
</html>

View file

@ -1,271 +0,0 @@
#!/usr/bin/env bash
#
# Steps:
#
# 1. Download corresponding html file for some README.md:
# curl -s $1
#
# 2. Discard rows where no substring 'user-content-' (github's markup):
# awk '/user-content-/ { ...
#
# 3.1 Get last number in each row like ' ... </span></a>sitemap.js</h1'.
# It's a level of the current header:
# substr($0, length($0), 1)
#
# 3.2 Get level from 3.1 and insert corresponding number of spaces before '*':
# sprintf("%*s", substr($0, length($0), 1)*3, " ")
#
# 4. Find head's text and insert it inside "* [ ... ]":
# substr($0, match($0, /a>.*<\/h/)+2, RLENGTH-5)
#
# 5. Find anchor and insert it inside "(...)":
# substr($0, match($0, "href=\"[^\"]+?\" ")+6, RLENGTH-8)
#
gh_toc_version="0.5.0"
gh_user_agent="gh-md-toc v$gh_toc_version"
#
# Download rendered into html README.md by its url.
#
#
gh_toc_load() {
local gh_url=$1
if type curl &>/dev/null; then
curl --user-agent "$gh_user_agent" -s "$gh_url"
elif type wget &>/dev/null; then
wget --user-agent="$gh_user_agent" -qO- "$gh_url"
else
echo "Please, install 'curl' or 'wget' and try again."
exit 1
fi
}
#
# Converts local md file into html by GitHub
#
# ➥ curl -X POST --data '{"text": "Hello world github/linguist#1 **cool**, and #1!"}' https://api.github.com/markdown
# <p>Hello world github/linguist#1 <strong>cool</strong>, and #1!</p>'"
gh_toc_md2html() {
local gh_file_md=$1
URL=https://api.github.com/markdown/raw
TOKEN="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/token.txt"
if [ -f "$TOKEN" ]; then
URL="$URL?access_token=$(cat $TOKEN)"
fi
OUTPUT="$(curl -s --user-agent "$gh_user_agent" \
--data-binary @"$gh_file_md" -H "Content-Type:text/plain" \
$URL)"
if [ "$?" != "0" ]; then
echo "XXNetworkErrorXX"
fi
if [ "$(echo "${OUTPUT}" | awk '/API rate limit exceeded/')" != "" ]; then
echo "XXRateLimitXX"
else
echo "${OUTPUT}"
fi
}
#
# Is passed string url
#
gh_is_url() {
case $1 in
https* | http*)
echo "yes";;
*)
echo "no";;
esac
}
#
# TOC generator
#
gh_toc(){
local gh_src=$1
local gh_src_copy=$1
local gh_ttl_docs=$2
local need_replace=$3
if [ "$gh_src" = "" ]; then
echo "Please, enter URL or local path for a README.md"
exit 1
fi
# Show "TOC" string only if working with one document
if [ "$gh_ttl_docs" = "1" ]; then
# echo "Table of Contents"
# echo "================="
# echo ""
gh_src_copy=""
fi
if [ "$(gh_is_url "$gh_src")" == "yes" ]; then
gh_toc_load "$gh_src" | gh_toc_grab "$gh_src_copy"
if [ "${PIPESTATUS[0]}" != "0" ]; then
echo "Could not load remote document."
echo "Please check your url or network connectivity"
exit 1
fi
if [ "$need_replace" = "yes" ]; then
echo
echo "!! '$gh_src' is not a local file"
echo "!! Can't insert the TOC into it."
echo
fi
else
local rawhtml=$(gh_toc_md2html "$gh_src")
if [ "$rawhtml" == "XXNetworkErrorXX" ]; then
echo "Parsing local markdown file requires access to github API"
echo "Please make sure curl is installed and check your network connectivity"
exit 1
fi
if [ "$rawhtml" == "XXRateLimitXX" ]; then
echo "Parsing local markdown file requires access to github API"
echo "Error: You exceeded the hourly limit. See: https://developer.github.com/v3/#rate-limiting"
TOKEN="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/token.txt"
echo "or place github auth token here: $TOKEN"
exit 1
fi
local toc=$(echo "$rawhtml" | gh_toc_grab "$gh_src_copy")
if [ "$need_replace" = "yes" ]; then
local ts="<\!--ts-->"
local te="<\!--te-->"
local tmp="${gh_src}.tmp"
# http://fahdshariff.blogspot.ru/2012/12/sed-mutli-line-replacement-between-two.html
# fix sed on mac
local sed='sed -i'
if [[ "`uname`" == "Darwin" ]]; then
sed='sed -i ""'
fi
# clear old TOC
$sed "/${ts}/,/${te}/{//!d;}" "$gh_src"
# create toc file
echo "${toc}" > "${tmp}"
# count headers above ts
local linesAbove=$(grep -n -- "${ts}" "${gh_src}" | cut -d: -f 1)
if [ -n "$linesAbove" ]; then
# skip headers above ts
local skip=$(head -n "$linesAbove" "${gh_src}" | grep '^\w*#' | wc -l)
$sed "1,${skip}d" "${tmp}"
# unindent file
local minLeadingSpaces=10000
while IFS='' read line; do
local leadingSpaces=$(echo "$line" | grep -o "^\s\+" | tr -d '\n' | wc -c)
if [ "$leadingSpaces" -lt "$minLeadingSpaces" ]; then
minLeadingSpaces=$leadingSpaces
fi
done < "${tmp}"
if [ "$minLeadingSpaces" -gt 0 ]; then
$sed "s/^.\{${minLeadingSpaces}\}//g" "${tmp}"
fi
# echo "$(awk 'NR==1 && match($0, /^ +/){n=RLENGTH} {sub("^ {"n"}", "")} 1' "${tmp}")" > "${tmp}"
fi
# insert toc file
$sed "/${ts}/r ${tmp}" "$gh_src"
# delete tmp file
toc=$(cat "${tmp}")
rm "${tmp}"
fi
# echo "$toc"
fi
}
#
# Grabber of the TOC from rendered html
#
# $1 — a source url of document.
# It's need if TOC is generated for multiple documents.
#
gh_toc_grab() {
# if closed <h[1-6]> is on the new line, then move it on the prev line
# for example:
# was: The command <code>foo1</code>
# </h1>
# became: The command <code>foo1</code></h1>
sed -e ':a' -e 'N' -e '$!ba' -e 's/\n<\/h/<\/h/g' |
# find strings that corresponds to template
grep -E -o '<a.*id="user-content-[^"]*".*</h[1-6]' |
# remove code tags
sed 's/<code>//' | sed 's/<\/code>//' |
# now all rows are like:
# <a id="user-content-..." href="..."><span ...></span></a> ... </h1
# format result line
# * $0 — whole string
echo -e "$(awk -v "gh_url=$1" '{
print sprintf("%*s", substr($0, length($0), 1)*3, " ") "* [" substr($0, match($0, /a>.*<\/h/)+2, RLENGTH-5)"](" gh_url substr($0, match($0, "href=\"[^\"]+?\" ")+6, RLENGTH-8) ")"}' | sed 'y/+/ /; s/%/\\x/g')"
}
#
# Returns filename only from full path or url
#
gh_toc_get_filename() {
echo "${1##*/}"
}
#
# Options hendlers
#
gh_toc_app() {
local app_name=$(basename $0)
local need_replace="no"
if [ "$1" = '--help' ] || [ $# -eq 0 ] ; then
echo "GitHub TOC generator ($app_name): $gh_toc_version"
echo ""
echo "Usage:"
echo " $app_name [--insert] src [src] Create TOC for a README file (url or local path)"
echo " $app_name - Create TOC for markdown from STDIN"
echo " $app_name --help Show help"
echo " $app_name --version Show version"
return
fi
if [ "$1" = '--version' ]; then
echo "$gh_toc_version"
return
fi
if [ "$1" = "-" ]; then
if [ -z "$TMPDIR" ]; then
TMPDIR="/tmp"
elif [ -n "$TMPDIR" -a ! -d "$TMPDIR" ]; then
mkdir -p "$TMPDIR"
fi
local gh_tmp_md
gh_tmp_md=$(mktemp $TMPDIR/tmp.XXXXXX)
while read input; do
echo "$input" >> "$gh_tmp_md"
done
gh_toc_md2html "$gh_tmp_md" | gh_toc_grab ""
return
fi
if [ "$1" = '--insert' ]; then
need_replace="yes"
shift
fi
for md in "$@"
do
# echo ""
gh_toc "$md" "$#" "$need_replace"
done
# echo ""
# echo "Created by [gh-md-toc](https://github.com/ekalinin/github-markdown-toc)"
}
#
# Entry point
#
gh_toc_app "$@"

Binary file not shown.

View file

@ -1 +0,0 @@
If a binaries for your OS are missing, download them from https://github.com/mmarkdown/mmark/releases

Binary file not shown.

View file

@ -1,62 +0,0 @@
#!/bin/bash
set -euo pipefail
# set -x
HTML=${1:-}
MARKDOWN=${2:-}
if [ -z "$HTML" -o -z "$MARKDOWN" ]; then
echo "Usage: $0 HTML MARKDOWN"
exit 1
fi
if [ ! -f "${HTML}" ]; then
echo "HTML file not found"
exit 1
fi
if [ ! -f "${MARKDOWN}" ]; then
echo "MARKDOWN file not found"
exit 1
fi
regex='<h([2-6]) id="([^"]+)">([^<]+)</h'
toc=''
while read line; do
if [[ $line =~ $regex ]]; then
level="${BASH_REMATCH[1]}"
id="${BASH_REMATCH[2]}"
header="${BASH_REMATCH[3]}"
[ -n "$toc" ] && printf -v toc "$toc\n"
for ((i=$level-2; i>0; i--)); do toc="${toc} "; done
toc="${toc}* [${header}](#${id})"
fi
done < "${HTML}"
# fix sed on mac
sed='sed -i'
if [[ "$(uname)" == "Darwin" ]]; then
sed='sed -i ""'
fi
ts="<\!--ts-->"
te="<\!--te-->"
existingTOC="$(cat "$MARKDOWN" | sed -n "/${ts}/,/${te}/p" | sed '1,1d; $d')"
if [ "$toc" != "$existingTOC" ]; then
tmp="$(mktemp)"
function finish {
rm "$tmp"
}
trap finish EXIT
echo "${toc}" > "${tmp}"
# clear old toc
$sed "/${ts}/,/${te}/{//!d;}" "$MARKDOWN"
# insert toc
$sed "/${ts}/r ${tmp}" "$MARKDOWN"
fi

View file

@ -1,6 +0,0 @@
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
$DIR/bin/mmark-linux-amd64 -head "$DIR/head.html" -html "$DIR/index.md" > "$DIR/index.html"
$DIR/bin/toc.sh "$DIR/index.html" "$DIR/index.md"

8
dev.sh Executable file
View file

@ -0,0 +1,8 @@
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
(
cd "$DIR"
bundle exec jekyll serve --watch
)

1350
index.html

File diff suppressed because it is too large Load diff

155
index.md
View file

@ -1,128 +1,13 @@
<main>
# LBRY: A Decentralized Digital Content Marketplace
<div class="toc-menu">Menu</div>
<nav class="toc"></nav>
<div id="content">
<noscript>
## Table of Contents
<!-- this TOC is autogenerated for github preview or js-challenged browsers -->
<!--ts-->
* [Introduction](#introduction)
* [Assumptions](#assumptions)
* [Overview](#overview)
* [Conventions and Terminology](#conventions-and-terminology)
* [Blockchain](#blockchain)
* [Stakes](#stakes)
* [Claims](#claims)
* [Claim Properties](#claim-properties)
* [Example Claim](#example-claim)
* [Claim Operations](#claim-operations)
* [Supports](#supports)
* [Support Properties](#support-properties)
* [Example Support](#example-support)
* [Support Operations](#support-operations)
* [Claimtrie](#claimtrie)
* [Statuses](#stake-statuses)
* [Accepted](#accepted)
* [Abandoned](#abandoned)
* [Active](#active)
* [Controlling (claims only)](#controlling)
* [Activation Delay](#activation-delay)
* [Claim Ordering](#claim-ordering)
* [Normalization](#normalization)
* [URLs](#urls)
* [Components](#components)
* [Stream Claim Name](#stream-claim-name)
* [Channel Claim Name](#channel-claim-name)
* [Channel Claim Name and Stream Claim Name](#channel-claim-name-and-stream-claim-name)
* [Claim ID](#claim-id)
* [Claim Sequence](#claim-sequence)
* [Bid Position](#bid-position)
* [Query Params](#query-params)
* [Grammar](#grammar)
* [Resolution](#url-resolution)
* [No Modifier](#no-modifier)
* [ClaimID](#claimid)
* [ClaimSequence](#claimsequence)
* [BidPosition](#bidposition)
* [ChannelClaimName and StreamClaimName](#channelclaimname-and-streamclaimname)
* [Design Notes](#design-notes)
* [Transactions](#transactions)
* [Operations and Opcodes](#operations-and-opcodes)
* [Stake Identifier Generation](#stake-identifier-generation)
* [OP_CLAIM_NAME](#op-claim-name)
* [OP_UPDATE_CLAIM](#op-update-claim)
* [OP_SUPPORT_CLAIM](#op-support-claim)
* [Proof of Payment](#proof-of-payment)
* [Consensus](#consensus)
* [Block Timing](#block-timing)
* [Difficulty Adjustment](#difficulty-adjustment)
* [Block Hash Algorithm](#block-hash-algorithm)
* [Block Rewards](#block-rewards)
* [Addresses](#addresses)
* [Metadata](#metadata)
* [Specification](#specification)
* [Example](#metadata-example)
* [Key Fields](#key-fields)
* [Stream Hash](#stream-hash)
* [Fee](#fee)
* [Title, Author, Description](#title-author-description)
* [Language](#language)
* [Thumbnail](#thumbnail)
* [Media Type](#media-type)
* [Channels (Identities)](#channels)
* [Signing](#signing)
* [Format Versions](#format-versions)
* [Signing Process](#signing-process)
* [Signature Validation](#signature-validation)
* [Validation](#metadata-validation)
* [Data](#data)
* [Encoding](#encoding)
* [Blobs](#blobs)
* [Streams](#streams)
* [Manifest Contents](#manifest-contents)
* [Stream Encoding](#stream-encoding)
* [Setup](#setup)
* [Content Blobs](#content-blobs)
* [Manifest Blob](#manifest-blob)
* [Stream Decoding](#stream-decoding)
* [Announce](#announce)
* [Distributed Hash Table](#distributed-hash-table)
* [Announcing to the DHT](#announcing-to-the-dht)
* [Download](#download)
* [Querying the DHT](#querying-the-dht)
* [Blob Exchange Protocol](#blob-exchange-protocol)
* [PriceCheck](#pricecheck)
* [DownloadCheck](#downloadcheck)
* [Download](#download-1)
* [UploadCheck](#uploadcheck)
* [Upload](#upload)
* [Reflectors and Data Markets](#reflectors-and-data-markets)
* [Appendix](#appendix)
* [Claim Activation Example](#claim-activation-example)
* [URL Resolution Examples](#url-resolution-examples)
<!--te-->
</noscript>
---
layout: spec
---
<!--
fixme final polish checklist:
- go over the paper to make sure we use active voice in most places (though passive is better sometimes)
- standardize when we say "we do X" vs "LBRY does X"
- check that all anchors work
- check css across browsers/mobile
- create links for [[terms]]
- ensure that all italicized terms are defined before they are used, or if that doesn't work, that they are linked
- don't say "the LBRY network". instead say "LBRY" or say nothing.
- make sure something is published at the URLs that we reference in this paper
- make sure something is published at the lbry URLs that we reference in this paper
-->
@ -210,7 +95,7 @@ A _claim_ is a stake that stores metadata. There are two types of claims:
<dl>
<dt>stream claim</dt>
<dd>Declares the availability, access method, and publisher of a [[stream]].</dd>
<dd>Declares the availability, access method, and publisher of a stream.</dd>
<dt>channel claim</dt>
<dd>Creates a pseudonym that can be declared as the publisher of stream claims.</dd>
</dl>
@ -249,7 +134,7 @@ Here is an example stream claim:
}
}
```
Figure: Note: the blockchain treats the `value` as an opaque byte string and does not impose any structure on it. Structure is applied and validated [higher in the stack](#metadata-validation). The value is shown here for demonstration purposes only.
Note: the blockchain treats the `value` as an opaque byte string and does not impose any structure on it. Structure is applied and validated [higher in the stack](#metadata-validation). The value is shown here for demonstration purposes only.
##### Claim Operations
@ -382,17 +267,12 @@ Names in the claimtrie are normalized when performing any comparisons. This is n
### URLs
<!-- fixme:
jeremy: @grin does SPV need a mention inside of the document?
grin: no, but we should probably include an example for how to do the validation using the root hash. its not strictly necessary because its similar to how bitcoin does it. so maybe link to https://lbry.tech/resources/claimtrie (which needs an update) and add a validation example there?
-->
URLs are memorable references to claims. All URLs:
1. contain a name (see [Claim Properties](#claim-properties)), and
2. resolve to a single, specific claim for that name
The ultimate purpose of much of the claim and blockchain design is to provide memorable URLs that can be provably resolved by clients without a full copy of the blockchain (i.e. [Simplified Payment Verification](https://lbry.tech/glossary#spv) wallets).
The ultimate purpose of much of the claim and blockchain design is to provide memorable URLs that can be provably resolved by clients without a full copy of the blockchain (e.g. [Simplified Payment Verification](https://bitcoin.org/en/glossary/simplified-payment-verification) wallets).
#### Components
@ -530,13 +410,13 @@ If multiple claims for the same name exist inside the same channel, they are res
#### Design Notes
The most contentious aspect of this design is the choice to resolve names without modifiers (sometimes called _vanity names_) to the claim with the highest effective amount. Before discussing the reasoning behind this decision, it should be noted that _only_ vanity URLs resolve this way. Permanent URLs that are short and memorable (e.g. `lbry://myclaimname#a`) exist and are available for the minimal cost of issuing a transaction.
The most contentious aspect of this design is the choice to resolve names without modifiers (sometimes called _vanity names_) to the claim with the highest effective amount. Before discussing the reasoning behind this decision, it should be noted that only vanity URLs resolve this way. Permanent URLs that are short and memorable (e.g. `lbry://myclaimname#a`) exist and are available for the minimal cost of issuing a transaction.
LBRY's resolution semantics stem from a dissatisfaction with existing name allocation designs. Most existing public name schemes are first-come, first-serve with a fixed price. This leads to several bad outcomes:
1. Speculation and extortion. Entrepreneurs are incentivized to register common names even if they don't intend to use them, in hopes of selling them to the proper owner in the future for an exorbitant price. While speculation in general can have positive externalities (stable prices and price signals), in this case it is pure value extraction. Speculation also harms the user experience, who will see the vast majority of URLs sitting unused (c.f. Namecoin).
2. Bureaucracy and transaction costs. While a centralized system can allow for an authority to use a process to reassign names based on trademark or other common use reasons, this system is also imperfect. Most importantly, it is a censorship point and an avenue for complete exclusion. Additionally, such processes are often arbitrary, change over time, involve significant transaction costs, and _still_ lead to names being used in ways that are contrary to user expectation (e.g. [nissan.com](http://nissan.com)).
2. Bureaucracy and transaction costs. While a centralized system can allow for an authority to use a process to reassign names based on trademark or other common use reasons, this system is also imperfect. Most importantly, it is a censorship point and an avenue for complete exclusion. Additionally, such processes are often arbitrary, change over time, involve significant transaction costs, and still lead to names being used in ways that are contrary to user expectation (e.g. [nissan.com](http://nissan.com)).
3. Inefficiencies from price controls. Any system that does not allow a price to float freely creates inefficiencies. If the set price is too low, there is speculation and rent-seeking. If the price is too high, people are excluded from a good that it would otherwise be beneficial for them to purchase.
@ -559,7 +439,7 @@ OP_UPDATE_CLAIM <name> <claimID> <value> OP_2DROP OP_2DROP <outputScript>
OP_SUPPORT_CLAIM <name> <claimID> OP_2DROP OP_DROP <outputScript>
```
The `<name>` parameter is the [[name]] that the claim is associated with. The `<value>` is the protobuf-encoded claim metadata and optional channel signature (see [Metadata](#metadata) for more about this value). The `<claimID>` is the claim ID of a previous claim that is being updated or supported.
The `<name>` parameter is the name that the claim is associated with. The `<value>` is the protobuf-encoded claim metadata and optional channel signature (see [Metadata](#metadata) for more about this value). The `<claimID>` is the claim ID of a previous claim that is being updated or supported.
Each opcode will push a zero on to the execution stack. Those zeros, as well as any additional parameters after the opcodes, are all dropped by `OP_2DROP` and `OP_DROP`. `<outputScript>` can be any valid script, so a script using these opcodes is also a pay-to-pubkey script. This means that claimtrie scripts can be spent just like regular Bitcoin output scripts.
@ -688,7 +568,7 @@ Heres some example metadata:
}
}
```
Figure: Some fields are omitted.
Note: Some fields are omitted.
### Key Fields
@ -910,7 +790,7 @@ Decoding a stream is like encoding in reverse, and with the added step of verify
### Announce
After a [[stream]] is encoded, it must be _announced_ to the network. Announcing is the process of letting other nodes on the network know that a client has content available for download. LBRY tracks announced content using a distributed hash table.
After a stream is encoded, it must be _announced_ to the network. Announcing is the process of letting other nodes on the network know that a client has content available for download. LBRY tracks announced content using a distributed hash table.
#### Distributed Hash Table
@ -919,7 +799,7 @@ specification fairly closely, with some modifications.
A distributed hash table is a key-value store that is spread over multiple nodes in a network. Nodes may join or leave the network anytime, with no central coordination necessary. Nodes communicate with each other using a peer-to-peer protocol to advertise what data they have and what they are best positioned to store.
When a host connects to the DHT, it announces the hash for every [[blob]] it wishes to share. Downloading a blob from the network requires querying the DHT for a list of hosts that announced that blobs hash (called _peers_), then requesting the blob from the peers directly.
When a host connects to the DHT, it announces the hash for every blob it wishes to share. Downloading a blob from the network requires querying the DHT for a list of hosts that announced that blobs hash (called _peers_), then requesting the blob from the peers directly.
#### Announcing to the DHT
@ -932,11 +812,11 @@ Once the search is over, the host sends a `Store(target_hash)` request to the cl
### Download
A client wishing to download a [[stream]] must first query the [[DHT]] to find [[peers]] hosting the [[blobs]] in that stream, then contact those peers to download the blobs directly.
A client wishing to download a stream must first query the DHT to find peers hosting the blobs in that stream, then contact those peers to download the blobs directly.
#### Querying the DHT
Querying works almost the same way as [[announcing]]. A client looking for a target hash starts by sending iterative `FindValue(target_hash)` requests to the nodes it knows that are closest to the target hash. If a node receives a `FindValue` request and knows of any peers for the target hash, it responds with a list of those peers. Otherwise, it responds with the closest nodes to the target hash that it knows about. The client then queries those closer nodes using the same `FindValue` call. This way, each call either finds the client some peers, or brings it closer to finding those peers. If no peers are found and no closer nodes are being returned, the client determines that the target hash is not available and gives up.
Querying works almost the same way as announcing. A client looking for a target hash starts by sending iterative `FindValue(target_hash)` requests to the nodes it knows that are closest to the target hash. If a node receives a `FindValue` request and knows of any peers for the target hash, it responds with a list of those peers. Otherwise, it responds with the closest nodes to the target hash that it knows about. The client then queries those closer nodes using the same `FindValue` call. This way, each call either finds the client some peers, or brings it closer to finding those peers. If no peers are found and no closer nodes are being returned, the client determines that the target hash is not available and gives up.
#### Blob Exchange Protocol
@ -945,7 +825,7 @@ Downloading a blob from a peer is governed by the _Blob Exchange Protocol_. It i
##### PriceCheck
PriceCheck gets the price that the server is charging for data transfer. It returns the prices in [[deweys]] per KB.
PriceCheck gets the price that the server is charging for data transfer. It returns the prices in deweys per KB.
##### DownloadCheck
@ -1116,6 +996,3 @@ URL | Claim ID
_Edit this on Github at https://github.com/lbryio/spec_
</div></main> <!-- DONT DELETE THIS, its for the TOC -->
<script src="https://hypothes.is/embed.js" async></script>

View file

@ -124,6 +124,11 @@ a {
color: #0074D9;
}
a.external-link::after {
content: url('data:image/svg+xml;utf8,<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512" color="#0074D9" width="13px" height="13px" class="ext-link-icon"><path fill="currentColor" d="M576 24v127.984c0 21.461-25.96 31.98-40.971 16.971l-35.707-35.709-243.523 243.523c-9.373 9.373-24.568 9.373-33.941 0l-22.627-22.627c-9.373-9.373-9.373-24.569 0-33.941L442.756 76.676l-35.703-35.705C391.982 25.9 402.656 0 424.024 0H552c13.255 0 24 10.745 24 24zM407.029 270.794l-16 16A23.999 23.999 0 0 0 384 303.765V448H64V128h264a24.003 24.003 0 0 0 16.97-7.029l16-16C376.089 89.851 365.381 64 344 64H48C21.49 64 0 85.49 0 112v352c0 26.51 21.49 48 48 48h352c26.51 0 48-21.49 48-48V287.764c0-21.382-25.852-32.09-40.971-16.97z" class=""></path></svg>');
padding-left: 3px;
}
code {
/* font-family: Consolas, "Andale Mono WT", "Andale Mono", "Lucida Console", "Lucida Sans Typewriter", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Liberation Mono", "Nimbus Mono L", Monaco, "Courier New", Courier, monospace; */
font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;

View file

@ -1,8 +0,0 @@
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
(
cd "$DIR"
./bin/reflex --decoration=none --start-service=true --glob='*' --inverse-regex='index\.html' -- sh -c "./build.sh"
)