Compare commits

...

26 commits

Author SHA1 Message Date
Brannon King
71fc94b1de
Merge pull request #410 from mjovanc/master 2021-11-26 18:37:08 -05:00
Marcus Cvjeticanin
927876e0c0 Changing download path artifactory URL for Boost 2021-11-26 20:52:04 +01:00
Alex Grin
73893d2f6b
Update README.md 2021-05-03 18:05:10 -04:00
Brannon King
7afc4c418a scale the cache change down a bit 2021-04-15 23:35:41 -04:00
Brannon King
db7e0b59e4 backport the rest of the depends adjustments from v17.4 2021-04-15 23:27:32 -04:00
Brannon King
1236c13a00 tweaked cache and minwork 2021-04-15 21:46:45 -04:00
Brannon King
a35385a5c0 backport wakeup fix 2021-04-15 21:15:40 -04:00
Brannon King
60a3d11df2 backport glib version fix, lsn_reset fix 2021-04-15 21:15:16 -04:00
Brannon King
cd7c2961dc rolling version 2021-04-15 10:13:33 -04:00
Alex Grintsvayg
03b1287359
Merge branch 'dnsseeds'
* dnsseeds:
  add a couple dns seeds
2021-04-13 10:12:14 -04:00
Alex Grintsvayg
131bcc8c9d
add a couple dns seeds 2021-04-12 13:49:13 -04:00
Alex Grintsvayg
29cc82db45
update github issue template 2021-04-05 10:07:20 -04:00
Brannon King
be118de19a raised default max tx fee 2019-11-26 15:16:59 -07:00
Brannon King
bf8cb69987 worked around an off-by-1 issue on the normalization fork block 2019-11-26 15:16:59 -07:00
Brannon King
37d177178f changed flush to have min height
don't flush blocks on regtest
2019-11-26 15:16:59 -07:00
gahag
f05b5973ae Implement basic log rotation (closes #211) (#344)
* Remove shrinkdebugfile flag (#211)

To implement a basic form of log rotation, two instances of the log file are to
be adopted: one for the current execution, and one for the previous
execution. On startup, if the log file exists, it will be renamed into the old
log file. This implies the deprecation and removal of the log shrink flag, since
the log is no longer forever growing.

* Implement log backup

To implement a basic form of log rotation, two instances of the log file are to
be adopted: one for the current execution, and one for the previous
execution. On startup, if the log file exists, it is renamed into the old
log file. This means that you should always have logs for the last 2 executions.

closes #211
2019-10-29 14:21:04 -06:00
Aditya J Karia
4a3c2e6504 Docs: Updated README.md files with TOC and best practices in Mar… (#339)
* Added table of contents to README.md

* Restructured headings and contents to follow Markdown best Practices
2019-10-29 14:20:05 -06:00
Bharat Raghunathan
8010681915 Update hyperlinks in README (#326) 2019-10-29 14:11:49 -06:00
GwanYeong Kim
6fe70b58ce Fix 'Use $(...) notation instead of legacy backticked ....' issue in shell script 2019-10-29 14:05:48 -06:00
Eric Brian Anil
51ec0a92f7 MIT License badge
Added the MIT license badge that redirects to the license page
2019-10-21 07:37:04 -06:00
Thomas Zarebczan
d3a8722ea8
Merge pull request #340 from addy1510/master
Fix typo in README.md
2019-10-17 21:06:08 -04:00
addy1510
ab08f6b35e
Fix typo in README.md 2019-10-18 06:33:42 +05:30
Brannon King
76e3d8861c error w/o segwit after fork 2019-10-11 14:16:38 -06:00
Brannon King
b16356b927
added segwit instructions 2019-10-10 10:50:02 -06:00
Brannon King
1bbedef565 ensure we don't return witness data in the transaction w/o segwit rule 2019-10-09 21:53:07 -06:00
Jeremy Kauffman
e5f049e5ed mention mailing list on README 2019-10-02 18:01:46 -06:00
40 changed files with 3941 additions and 184 deletions

View file

@ -1,8 +1,8 @@
<!-- This issue tracker is only for technical issues related to Bitcoin Core. <!-- This issue tracker is only for technical issues related to lbrycrd (the LBRY blockchain).
General bitcoin questions and/or support requests are best directed to the Bitcoin StackExchange at https://bitcoin.stackexchange.com. General questions and/or support requests are best directed to the community chat at https://chat.lbry.org.
For reporting security issues, please read instructions at https://bitcoincore.org/en/contact/. For reporting security issues, please email security@lbry.com.
If the node is "stuck" during sync or giving "block checksum mismatch" errors, please ensure your hardware is stable by running memtest and observe CPU temperature with a load-test tool such as linpack before creating an issue! --> If the node is "stuck" during sync or giving "block checksum mismatch" errors, please ensure your hardware is stable by running memtest and observe CPU temperature with a load-test tool such as linpack before creating an issue! -->
@ -13,7 +13,7 @@ If the node is "stuck" during sync or giving "block checksum mismatch" errors, p
<!--- How reliably can you reproduce the issue, what are the steps to do so? --> <!--- How reliably can you reproduce the issue, what are the steps to do so? -->
<!-- What version of Bitcoin Core are you using, where did you get it (website, self-compiled, etc)? --> <!-- What version of lbrycrd are you using, where did you get it (website, self-compiled, etc)? -->
<!-- What type of machine are you observing the error on (OS/CPU and disk type)? --> <!-- What type of machine are you observing the error on (OS/CPU and disk type)? -->

View file

@ -14,7 +14,7 @@ jobs:
- &build-template - &build-template
stage: build stage: build
name: linux name: linux
env: NAME=linux EXT= env: NAME=linux DOCKER_IMAGE=lbry/build_lbrycrd_gcc EXT=
os: linux os: linux
dist: xenial dist: xenial
language: minimal language: minimal
@ -22,8 +22,9 @@ jobs:
- docker - docker
install: install:
- mkdir -p ${HOME}/ccache - mkdir -p ${HOME}/ccache
- docker pull $DOCKER_BUILD_IMAGE - docker pull $DOCKER_IMAGE
script: script:
- echo "build..."
- docker run -v "$(pwd):/lbrycrd" -v "${HOME}/ccache:/ccache" -w /lbrycrd -e CCACHE_DIR=/ccache ${DOCKER_IMAGE} packaging/build_${NAME}_64bit.sh - docker run -v "$(pwd):/lbrycrd" -v "${HOME}/ccache:/ccache" -w /lbrycrd -e CCACHE_DIR=/ccache ${DOCKER_IMAGE} packaging/build_${NAME}_64bit.sh
before_deploy: before_deploy:
- mkdir -p dist - mkdir -p dist
@ -47,11 +48,11 @@ jobs:
- <<: *build-template - <<: *build-template
name: windows name: windows
env: NAME=windows EXT=.exe env: NAME=windows DOCKER_IMAGE=lbry/build_lbrycrd EXT=.exe
- <<: *build-template - <<: *build-template
name: osx name: osx
env: NAME=darwin EXT= env: NAME=darwin DOCKER_IMAGE=lbry/build_lbrycrd EXT=
before_install: before_install:
- mkdir -p ./depends/SDKs && pushd depends/SDKs && curl -C - ${MAC_OS_SDK} | tar --skip-old-files -xJ && popd - mkdir -p ./depends/SDKs && pushd depends/SDKs && curl -C - ${MAC_OS_SDK} | tar --skip-old-files -xJ && popd
@ -62,7 +63,7 @@ jobs:
dist: xenial dist: xenial
language: minimal language: minimal
git: git:
depth: 3 clone: false
install: install:
- mkdir -p testrun && cd testrun - mkdir -p testrun && cd testrun
- curl http://build.lbry.io/lbrycrd/${TRAVIS_BRANCH}/lbrycrd-${NAME}-test.zip -o temp.zip - curl http://build.lbry.io/lbrycrd/${TRAVIS_BRANCH}/lbrycrd-${NAME}-test.zip -o temp.zip
@ -77,10 +78,11 @@ jobs:
services: services:
- docker - docker
script: script:
- docker pull $DOCKER_WINE_IMAGE - docker pull lbry/wine
- docker run -v "$(pwd):/test" -e "WINEDEBUG=-all" -e "TRIEHASH_FUZZER_BLOCKS=1000" -it $DOCKER_WINE_IMAGE wine "/test/test_lbrycrd.exe" - docker run -v "$(pwd):/test" -e "WINEDEBUG=-all" -e "TRIEHASH_FUZZER_BLOCKS=1000" -it lbry/wine wine "/test/test_lbrycrd.exe"
- <<: *test-template - <<: *test-template
os: osx os: osx
osx_image: xcode8.3 osx_image: xcode8.3
env: NAME=darwin env: NAME=darwin

113
README.md
View file

@ -1,14 +1,35 @@
# LBRYcrd - The LBRY blockchain # LBRYcrd - The LBRY blockchain
[![Build Status](https://travis-ci.org/lbryio/lbrycrd.svg?branch=master)](https://travis-ci.org/lbryio/lbrycrd) [![Build Status](https://travis-ci.org/lbryio/lbrycrd.svg?branch=master)](https://travis-ci.org/lbryio/lbrycrd)
[![MIT licensed](https://img.shields.io/dub/l/vibe-d.svg?style=flat)](https://github.com/lbryio/lbry-desktop/blob/master/LICENSE)
LBRYcrd uses a blockchain similar to bitcoin's to implement an index and payment system for content on the LBRY network. It is a fork of bitcoin core. In addition to the libraries used by bitcoin, LBRYcrd also uses icu4c. LBRYcrd uses a blockchain similar to bitcoin's to implement an index and payment system for content on the LBRY network. It is a fork of [bitcoin core](https://github.com/bitcoin/bitcoin). In addition to the libraries used by bitcoin, LBRYcrd also uses [icu4c](https://github.com/unicode-org/icu/tree/master/icu4c).
Please read the [lbry.tech overview](https://lbry.tech/overview) for a general understanding of the LBRY pieces. From there you could read the [LBRY spec](https://spec.lbry.com/) for specifics on the data in the blockchain. Please read the [lbry.tech overview](https://lbry.tech/overview) for a general understanding of the LBRY pieces. From there you could read the [LBRY spec](https://spec.lbry.com/) for specifics on the data in the blockchain.
## Table of Contents
1. [Installation](#installation)
2. [Usage](#usage)
1. [Examples](#examples)
2. [Data directory](#data-directory)
3. [Running from Source](#running-from-source)
1. [Ubuntu with pulled static dependencies](#ubuntu-with-pulled-static-dependencies)
2. [Ubuntu with local shared dependencies](#ubuntu-with-local-shared-dependencies)
3. [MacOS (cross-compiled)](<#macos-(cross-compiled)>)
4. [MacOS with local shared dependencies](#macos-with-local-shared-dependencies)
5. [Windows (cross-compiled)](<#windows-(cross-compiled)>)
6. [Use with CLion](#use-with-clion)
4. [Contributing](#contributing)
- [Testnet](#testnet)
5. [Mailing List](#mailing-list)
6. [License](#license)
7. [Security](#security)
8. [Contact](#contact)
## Installation ## Installation
Latest binaries are available from https://github.com/lbryio/lbrycrd/releases. There is no installation procedure; the CLI binaries will run as-is and will have any uncommon dependencies statically linked into the binary. The QT GUI is not supported. LBRYcrd is distributed as a collection of executable files; traditional installers are not provided. Latest binaries are available from https://github.com/lbryio/lbrycrd/releases. There is no installation procedure; the CLI binaries will run as-is and will have any uncommon dependencies statically linked into the binary. The QT GUI is not supported. LBRYcrd is distributed as a collection of executable files; traditional installers are not provided.
## Usage ## Usage
@ -16,7 +37,7 @@ The `lbrycrdd` executable will start a LBRYcrd node and connect you to the LBRYc
to interact with lbrycrdd through the command line. Command-line help for both executables are available through to interact with lbrycrdd through the command line. Command-line help for both executables are available through
the "--help" flag (e.g. `lbrycrdd --help`). Examples: the "--help" flag (e.g. `lbrycrdd --help`). Examples:
#### Examples: #### Examples
Run `./lbrycrdd -server -daemon` to start lbrycrdd in the background. Run `./lbrycrdd -server -daemon` to start lbrycrdd in the background.
@ -25,7 +46,8 @@ Run `./lbrycrd-cli -getinfo` to check for some basic information about your LBRY
Run `./lbrycrd-cli help` to get a list of all commands that you can run. To get help on specific commands run `./lbrycrd-cli [command_name] help` Run `./lbrycrd-cli help` to get a list of all commands that you can run. To get help on specific commands run `./lbrycrd-cli [command_name] help`
Test locally: Test locally:
```
```sh
./lbrycrdd -server -regtest -txindex # run this in its own window ./lbrycrdd -server -regtest -txindex # run this in its own window
./lbrycrd-cli -regtest generate 120 # mine 20 spendable coins ./lbrycrd-cli -regtest generate 120 # mine 20 spendable coins
./lbrycrd-cli -regtest claimname my_name deadbeef 1 # hold a name claim with 1 coin ./lbrycrd-cli -regtest claimname my_name deadbeef 1 # hold a name claim with 1 coin
@ -35,24 +57,26 @@ Test locally:
./lbrycrd-cli -regtest stop # kill lbrycrdd ./lbrycrd-cli -regtest stop # kill lbrycrdd
rm -fr ~/.lbrycrd/regtest/ # destroy regtest data rm -fr ~/.lbrycrd/regtest/ # destroy regtest data
``` ```
For further understanding of a "regtest" setup, see the local stack setup instructions here: https://lbry.tech/resources/regtest-setup For further understanding of a "regtest" setup, see the local stack setup instructions here: https://lbry.tech/resources/regtest-setup
The CLI help is also browsable online at https://lbry.tech/api/blockchain The CLI help is also browsable online at https://lbry.tech/api/blockchain
#### Data directory: #### Data directory
Lbrycrdd will use the below default data directories (changeable with -datadir): Lbrycrdd will use the below default data directories (changeable with -datadir):
``` ```sh
Windows: %APPDATA%\lbrycrd Windows: %APPDATA%\lbrycrd
Mac: ~/Library/Application Support/lbrycrd Mac: ~/Library/Application Support/lbrycrd
Unix: ~/.lbrycrd Unix: ~/.lbrycrd
``` ```
The data directory contains various things such as your default wallet (wallet.dat), debug logs (debug.log), and blockchain data. You can optionally create a configuration file lbrycrd.conf in the default data directory which will be used by default when running lbrycrdd. The data directory contains various things such as your default wallet (wallet.dat), debug logs (debug.log), and blockchain data. You can optionally create a configuration file lbrycrd.conf in the default data directory which will be used by default when running lbrycrdd.
For a list of configuration parameters, run `./lbrycrdd --help`. Below is a sample lbrycrd.conf to enable JSON RPC server on lbrycrdd. For a list of configuration parameters, run `./lbrycrdd --help`. Below is a sample lbrycrd.conf to enable JSON RPC server on lbrycrdd.
``` ```sh
rpcuser=lbry rpcuser=lbry
rpcpassword=xyz123456790 rpcpassword=xyz123456790
daemon=1 daemon=1
@ -61,15 +85,19 @@ txindex=1
``` ```
## Running from Source ## Running from Source
The easiest way to compile is to utilize the Docker image that contains the necessary compilers: lbry/build_lbrycrd. This will allow you to reproduce the build as made on our build servers. I this sample we map a local lbrycrd folder and a local ccache folder inside the image:
``` The easiest way to compile is to utilize the Docker image that contains the necessary compilers: lbry/build_lbrycrd. This will allow you to reproduce the build as made on our build servers. In this sample we map a local lbrycrd folder and a local ccache folder inside the image:
```sh
git clone https://github.com/lbryio/lbrycrd.git git clone https://github.com/lbryio/lbrycrd.git
cd lbrycrd cd lbrycrd
docker run -v "$(pwd):/lbrycrd" --rm -v "${HOME}/ccache:/ccache" -w /lbrycrd -e CCACHE_DIR=/ccache lbry/build_lbrycrd packaging/build_linux_64bit.sh docker run -v "$(pwd):/lbrycrd" --rm -v "${HOME}/ccache:/ccache" -w /lbrycrd -e CCACHE_DIR=/ccache lbry/build_lbrycrd packaging/build_linux_64bit.sh
``` ```
Some examples of compiling directly: Some examples of compiling directly:
#### Ubuntu with pulled static dependencies:
``` #### Ubuntu with pulled static dependencies
```sh
sudo apt install build-essential git libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates sudo apt install build-essential git libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates
git clone https://github.com/lbryio/lbrycrd.git git clone https://github.com/lbryio/lbrycrd.git
cd lbrycrd cd lbrycrd
@ -77,16 +105,20 @@ cd lbrycrd
./src/test/test_lbrycrd ./src/test/test_lbrycrd
``` ```
Other Linux distros would be similar. The build shell script is fairly trivial; take a peek at its contents. Other Linux distros would be similar. The build shell script is fairly trivial; take a peek at its contents.
#### Ubuntu with local shared dependencies:
#### Ubuntu with local shared dependencies
Note: using untested dependencies may lead to conflicting results. Note: using untested dependencies may lead to conflicting results.
```
```sh
sudo add-apt-repository ppa:bitcoin/bitcoin sudo add-apt-repository ppa:bitcoin/bitcoin
sudo apt-get update sudo apt-get update
sudo apt-get install libdb4.8-dev libdb4.8++-dev libicu-dev libssl-dev libevent-dev \ sudo apt-get install libdb4.8-dev libdb4.8++-dev libicu-dev libssl-dev libevent-dev \
build-essential git libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates \ build-essential git libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates \
libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev
# optionally include libminiupnpc-dev libzmq3-dev # optionally include libminiupnpc-dev libzmq3-dev
git clone https://github.com/lbryio/lbrycrd.git git clone https://github.com/lbryio/lbrycrd.git
@ -97,11 +129,13 @@ make -j$(nproc)
./src/lbrycrdd -server ... ./src/lbrycrdd -server ...
``` ```
#### MacOS (cross-compiled):
``` #### MacOS (cross-compiled)
```sh
sudo apt-get install clang llvm git libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates \ sudo apt-get install clang llvm git libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates \
libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-test-dev libboost-thread-dev
git clone https://github.com/lbryio/lbrycrd.git git clone https://github.com/lbryio/lbrycrd.git
cd lbrycrd cd lbrycrd
# download MacOS SDK from your favorite source # download MacOS SDK from your favorite source
@ -110,9 +144,12 @@ tar ... extract SDK to depends/SDKs/MacOSX10.11.sdk
./packaging/build_darwin_64bit.sh ./packaging/build_darwin_64bit.sh
``` ```
Look in packaging/build_darwin_64bit.sh for further understanding. Look in packaging/build_darwin_64bit.sh for further understanding.
#### MacOS with local shared dependencies:
``` #### MacOS with local shared dependencies
```sh
brew install boost berkeley-db@4 icu4c libevent brew install boost berkeley-db@4 icu4c libevent
# fix conflict with gawk pulled first: # fix conflict with gawk pulled first:
brew reinstall readline brew reinstall readline
@ -127,14 +164,17 @@ CONFIG_SITE=$(pwd)/depends/x86_64-apple-darwin15.6.0/share/config.site ./configu
make -j$(sysctl -n hw.ncpu) make -j$(sysctl -n hw.ncpu)
``` ```
#### Windows (cross-compiled):
#### Windows (cross-compiled)
Compiling on MS Windows (outside of WSL) is not supported. The Windows build is cross-compiled from Linux like so: Compiling on MS Windows (outside of WSL) is not supported. The Windows build is cross-compiled from Linux like so:
```
```sh
sudo apt-get install build-essential git libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates \ sudo apt-get install build-essential git libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates \
g++-mingw-w64-x86-64 mingw-w64-x86-64-dev g++-mingw-w64-x86-64 mingw-w64-x86-64-dev
update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix
git clone https://github.com/lbryio/lbrycrd.git git clone https://github.com/lbryio/lbrycrd.git
cd lbrycrd cd lbrycrd
./packaging/build_windows_64bit.sh ./packaging/build_windows_64bit.sh
@ -143,13 +183,16 @@ cd lbrycrd
If you encounter any errors, please check `doc/build-*.md` for further instructions. If you're still stuck, [create an issue](https://github.com/lbryio/lbrycrd/issues/new) with the output of that command, your system info, and any other information you think might be helpful. The scripts in the packaging folder are simple and will grant extra light on the build process as needed. If you encounter any errors, please check `doc/build-*.md` for further instructions. If you're still stuck, [create an issue](https://github.com/lbryio/lbrycrd/issues/new) with the output of that command, your system info, and any other information you think might be helpful. The scripts in the packaging folder are simple and will grant extra light on the build process as needed.
#### Use with CLion: #### Use with CLion
CLion has not traditionally supported Autotools projects, although some progress on that is now in the works. We do include a cmake build file for compiling lbrycrd. See contrib/cmake. Alas, CLion doesn't support external projects in cmake, so that particular approach is also insufficient. CLion does support "compile_commands.json" projects. Fortunately, this can be easily generated for lbrycrd like so: CLion has not traditionally supported Autotools projects, although some progress on that is now in the works. We do include a cmake build file for compiling lbrycrd. See contrib/cmake. Alas, CLion doesn't support external projects in cmake, so that particular approach is also insufficient. CLion does support "compile_commands.json" projects. Fortunately, this can be easily generated for lbrycrd like so:
```
```sh
pip install --user compiledb pip install --user compiledb
./autogen.sh && ./configure --enable-static=no --enable-shared --with-pic --without-gui CXXFLAGS="-O0 -g" CFLAGS="-O0 -g" # or whatever normal lbrycrd config ./autogen.sh && ./configure --enable-static=no --enable-shared --with-pic --without-gui CXXFLAGS="-O0 -g" CFLAGS="-O0 -g" # or whatever normal lbrycrd config
compiledb make -j10 compiledb make -j10
``` ```
Then open the newly generated compile_commands.json file as a project in CLion. Debugging is supported if you compiled with `-g`. To enable that you will need to create a target in CLion by going to File -> Settings -> Build -> Custom Build Targets. Add an empty target with your choice of name. From there you can go to "Edit Configurations", typically found in a drop-down at the top of the editor. Add a Custom Build Application, select your new target, select the compiled file (i.e. test_lbrycrd or lbrycrdd, etc), and then add any necessary command line parameters. Ensure that there is nothing in the "Before launch" section. Then open the newly generated compile_commands.json file as a project in CLion. Debugging is supported if you compiled with `-g`. To enable that you will need to create a target in CLion by going to File -> Settings -> Build -> Custom Build Targets. Add an empty target with your choice of name. From there you can go to "Edit Configurations", typically found in a drop-down at the top of the editor. Add a Custom Build Application, select your new target, select the compiled file (i.e. test_lbrycrd or lbrycrdd, etc), and then add any necessary command line parameters. Ensure that there is nothing in the "Before launch" section.
## Contributing ## Contributing
@ -167,8 +210,8 @@ regularly to indicate new official, stable release versions.
Testing and code review is the bottleneck for development; we get more pull Testing and code review is the bottleneck for development; we get more pull
requests than we can review and test on short notice. Please be patient and help out by testing requests than we can review and test on short notice. Please be patient and help out by testing
other people's pull requests, and remember this is a security-critical project where any mistake might cost people other people's pull requests, and remember this is a security-critical project where any mistake might cost people
lots of money. Developers are strongly encouraged to write [unit tests](/doc/unit-tests.md) for new code and to lots of money. Developers are strongly encouraged to write [unit tests](/src/test/README.md) for new code and to
submit new unit tests for old code. Unit tests are compiled by default and can be run with `src/test/test_lbrycrd`. submit new unit tests for old code. Unit tests are compiled by default and can be run with `src/test/test_lbrycrd`
The Travis CI system makes sure that every pull request is built, and that unit and sanity tests are automatically run. See https://travis-ci.org/lbryio/lbrycrd The Travis CI system makes sure that every pull request is built, and that unit and sanity tests are automatically run. See https://travis-ci.org/lbryio/lbrycrd
@ -176,7 +219,11 @@ The Travis CI system makes sure that every pull request is built, and that unit
Testnet is maintained for testing purposes and can be accessed using the command `./lbrycrdd -testnet`. If you would like to obtain testnet credits, please contact brannon@lbry.com or grin@lbry.com . Testnet is maintained for testing purposes and can be accessed using the command `./lbrycrdd -testnet`. If you would like to obtain testnet credits, please contact brannon@lbry.com or grin@lbry.com .
It is easy to solo mine on testnet. (It's easy on mainnet too, but much harder to win.) For instructions see https://github.com/lbryio/sgminer-gm and https://github.com/lbryio/lbrycrd/tree/master/contrib/mining It is easy to solo mine on testnet. (It's easy on mainnet too, but much harder to win.) For instructions see [SGMiner](https://github.com/lbryio/sgminer-gm) and [Mining Contributions](https://github.com/lbryio/lbrycrd/tree/master/contrib/mining)
## Mailing List
We maintain a mailing list for notifications of upgrades, security issues, and soft/hard forks. To join, visit [https://lbry.com/forklist](https://lbry.com/forklist).
## License ## License
@ -184,11 +231,9 @@ This project is MIT licensed. For the full license, see [LICENSE](LICENSE).
## Security ## Security
We take security seriously. Please contact security@lbry.com regarding any security issues. We take security seriously. Please contact [security@lbry.com](mailto:security@lbry.com) regarding any security issues.
Our PGP key is [here](https://keybase.io/lbry/key.asc) if you need it. Our PGP key is [here](https://lbry.com/faq/pgp-key) if you need it.
## Contact ## Contact
The primary contact for this project is [@BrannonKing](https://github.com/BrannonKing) (brannon@lbry.com) The primary contact for this project is [@BrannonKing](https://github.com/BrannonKing) (brannon@lbry.com)

View file

@ -7,7 +7,7 @@ export LC_ALL=C
set -e set -e
srcdir="$(dirname $0)" srcdir="$(dirname $0)"
cd "$srcdir" cd "$srcdir"
if [ -z ${LIBTOOLIZE} ] && GLIBTOOLIZE="`which glibtoolize 2>/dev/null`"; then if [ -z ${LIBTOOLIZE} ] && GLIBTOOLIZE="$(which glibtoolize 2>/dev/null)"; then
LIBTOOLIZE="${GLIBTOOLIZE}" LIBTOOLIZE="${GLIBTOOLIZE}"
export LIBTOOLIZE export LIBTOOLIZE
fi fi

View file

@ -3,9 +3,9 @@ AC_PREREQ([2.60])
define(_CLIENT_VERSION_MAJOR, 0) define(_CLIENT_VERSION_MAJOR, 0)
define(_CLIENT_VERSION_MINOR, 17) define(_CLIENT_VERSION_MINOR, 17)
define(_CLIENT_VERSION_REVISION, 3) define(_CLIENT_VERSION_REVISION, 3)
define(_CLIENT_VERSION_BUILD, 1) define(_CLIENT_VERSION_BUILD, 3)
define(_CLIENT_VERSION_IS_RELEASE, true) define(_CLIENT_VERSION_IS_RELEASE, true)
define(_COPYRIGHT_YEAR, 2019) define(_COPYRIGHT_YEAR, 2021)
define(_COPYRIGHT_HOLDERS,[The %s developers]) define(_COPYRIGHT_HOLDERS,[The %s developers])
define(_COPYRIGHT_HOLDERS_SUBSTITUTION,[[LBRYcrd Core]]) define(_COPYRIGHT_HOLDERS_SUBSTITUTION,[[LBRYcrd Core]])
AC_INIT([LBRYcrd Core],[_CLIENT_VERSION_MAJOR._CLIENT_VERSION_MINOR._CLIENT_VERSION_REVISION],[https://github.com/lbryio/lbrycrd/issues],[lbrycrd],[https://lbry.com/]) AC_INIT([LBRYcrd Core],[_CLIENT_VERSION_MAJOR._CLIENT_VERSION_MINOR._CLIENT_VERSION_REVISION],[https://github.com/lbryio/lbrycrd/issues],[lbrycrd],[https://lbry.com/])

View file

@ -36,13 +36,13 @@ if [ -z "${CODESIGN_ALLOCATE}" ]; then
fi fi
find ${TEMPDIR} -name "*.sign" | while read i; do find ${TEMPDIR} -name "*.sign" | while read i; do
SIZE=`stat -c %s "${i}"` SIZE=$(stat -c %s "${i}")
TARGET_FILE="`echo "${i}" | sed 's/\.sign$//'`" TARGET_FILE="$(echo "${i}" | sed 's/\.sign$//')"
echo "Allocating space for the signature of size ${SIZE} in ${TARGET_FILE}" echo "Allocating space for the signature of size ${SIZE} in ${TARGET_FILE}"
${CODESIGN_ALLOCATE} -i "${TARGET_FILE}" -a ${ARCH} ${SIZE} -o "${i}.tmp" ${CODESIGN_ALLOCATE} -i "${TARGET_FILE}" -a ${ARCH} ${SIZE} -o "${i}.tmp"
OFFSET=`${PAGESTUFF} "${i}.tmp" -p | tail -2 | grep offset | sed 's/[^0-9]*//g'` OFFSET=$(${PAGESTUFF} "${i}.tmp" -p | tail -2 | grep offset | sed 's/[^0-9]*//g')
if [ -z ${QUIET} ]; then if [ -z ${QUIET} ]; then
echo "Attaching signature at offset ${OFFSET}" echo "Attaching signature at offset ${OFFSET}"
fi fi

View file

@ -27,19 +27,19 @@ ${CODESIGN} -f --file-list ${TEMPLIST} "$@" "${BUNDLE}"
grep -v CodeResources < "${TEMPLIST}" | while read i; do grep -v CodeResources < "${TEMPLIST}" | while read i; do
TARGETFILE="${BUNDLE}/`echo "${i}" | sed "s|.*${BUNDLE}/||"`" TARGETFILE="${BUNDLE}/`echo "${i}" | sed "s|.*${BUNDLE}/||"`"
SIZE=`pagestuff "$i" -p | tail -2 | grep size | sed 's/[^0-9]*//g'` SIZE=$(pagestuff "$i" -p | tail -2 | grep size | sed 's/[^0-9]*//g')
OFFSET=`pagestuff "$i" -p | tail -2 | grep offset | sed 's/[^0-9]*//g'` OFFSET=$(pagestuff "$i" -p | tail -2 | grep offset | sed 's/[^0-9]*//g')
SIGNFILE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}.sign" SIGNFILE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}.sign"
DIRNAME="`dirname "${SIGNFILE}"`" DIRNAME="$(dirname "${SIGNFILE}")"
mkdir -p "${DIRNAME}" mkdir -p "${DIRNAME}"
echo "Adding detached signature for: ${TARGETFILE}. Size: ${SIZE}. Offset: ${OFFSET}" echo "Adding detached signature for: ${TARGETFILE}. Size: ${SIZE}. Offset: ${OFFSET}"
dd if="$i" of="${SIGNFILE}" bs=1 skip=${OFFSET} count=${SIZE} 2>/dev/null dd if="$i" of="${SIGNFILE}" bs=1 skip=${OFFSET} count=${SIZE} 2>/dev/null
done done
grep CodeResources < "${TEMPLIST}" | while read i; do grep CodeResources < "${TEMPLIST}" | while read i; do
TARGETFILE="${BUNDLE}/`echo "${i}" | sed "s|.*${BUNDLE}/||"`" TARGETFILE="${BUNDLE}/$(echo "${i}" | sed "s|.*${BUNDLE}/||")"
RESOURCE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}" RESOURCE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}"
DIRNAME="`dirname "${RESOURCE}"`" DIRNAME="$(dirname "${RESOURCE}")"
mkdir -p "${DIRNAME}" mkdir -p "${DIRNAME}"
echo "Adding resource for: \"${TARGETFILE}\"" echo "Adding resource for: \"${TARGETFILE}\""
cp "${i}" "${RESOURCE}" cp "${i}" "${RESOURCE}"

View file

@ -36,7 +36,7 @@ insert into coins(name, symbol, symbol2, algo, enable, auto_ready, rpcuser, rpcp
values('Local LBRY Instance', 'LBC', 'LBC', 'lbry', 1, 1, 'ruser', 'rpswd', '127.0.0.1', 19245, 1, 'utf-8', 0, 1, 0, 0, 0); values('Local LBRY Instance', 'LBC', 'LBC', 'lbry', 1, 1, 'ruser', 'rpswd', '127.0.0.1', 19245, 1, 'utf-8', 0, 1, 0, 0, 0);
exit exit
``` ```
Use port 19245 for testnet, port 9245 for main. Use port 19245 for testnet, port 9245 for main. Set usesegwit to 1 after the segwit fork is enabled on December 11, 2019.
#### 3. Run the stratum server: #### 3. Run the stratum server:
``` ```
docker run --network host -d lbry/yiimp_stratum docker run --network host -d lbry/yiimp_stratum
@ -47,10 +47,11 @@ docker run --network host -it lbry/yiimp_stratum bash
cat config/lbry.conf cat config/lbry.conf
./stratum config/lbry ./stratum config/lbry
``` ```
When testing with an ASIC you may need to modify the TCP server address in said lbry.conf file to be an external IP address.
#### 4. Connect sgminer to it: #### 4. Connect sgminer to it:
``` ```
sgminer -k lbry -o stratum+tcp://127.0.0.1:3334/ -D -T -O mn824Su1wX7ip8WcNYzXwwWqvBvkeWGRo6:x sgminer -k lbry -o stratum+tcp://127.0.0.1:3334/ -D -T -O mn824Su1wX7ip8WcNYzXwwWqvBvkeWGRo6:x
``` ```
The username there is the account to receive payments from the pool. The password is unused. Tested with https://github.com/lbryio/sgminer-gm. The username there is the account to receive payments from the pool. The password is unused. Tested with https://github.com/lbryio/sgminer-gm.
You can use whatever miner you prefer. You can use whatever miner you prefer.

View file

@ -23,7 +23,7 @@ TIMESERVER=http://timestamp.comodoca.com
CERTFILE="win-codesign.cert" CERTFILE="win-codesign.cert"
mkdir -p "${OUTSUBDIR}" mkdir -p "${OUTSUBDIR}"
basename -a `ls -1 "${SRCDIR}"/*-unsigned.exe` | while read UNSIGNED; do basename -a $(ls -1 "${SRCDIR}"/*-unsigned.exe) | while read UNSIGNED; do
echo Signing "${UNSIGNED}" echo Signing "${UNSIGNED}"
"${OSSLSIGNCODE}" sign -certs "${CERTFILE}" -t "${TIMESERVER}" -in "${SRCDIR}/${UNSIGNED}" -out "${WORKDIR}/${UNSIGNED}" "$@" "${OSSLSIGNCODE}" sign -certs "${CERTFILE}" -t "${TIMESERVER}" -in "${SRCDIR}/${UNSIGNED}" -out "${WORKDIR}/${UNSIGNED}" "$@"
"${OSSLSIGNCODE}" extract-signature -pem -in "${WORKDIR}/${UNSIGNED}" -out "${OUTSUBDIR}/${UNSIGNED}.pem" && rm "${WORKDIR}/${UNSIGNED}" "${OSSLSIGNCODE}" extract-signature -pem -in "${WORKDIR}/${UNSIGNED}" -out "${OUTSUBDIR}/${UNSIGNED}.pem" && rm "${WORKDIR}/${UNSIGNED}"

View file

@ -131,7 +131,7 @@ $(host_prefix)/share/config.site : config.site.in $(host_prefix)/.stamp_$(final_
-e 's|@build_os@|$(build_os)|' \ -e 's|@build_os@|$(build_os)|' \
-e 's|@host_os@|$(host_os)|' \ -e 's|@host_os@|$(host_os)|' \
-e 's|@CFLAGS@|$(strip $(host_CFLAGS) $(host_$(release_type)_CFLAGS))|' \ -e 's|@CFLAGS@|$(strip $(host_CFLAGS) $(host_$(release_type)_CFLAGS))|' \
-e 's|@CXXFLAGS@|$(strip $(host_CXXFLAGS) $(host_$(release_type)_CXXFLAGS))|' \ -e 's|@CXXFLAGS@|$(strip -pipe $(host_$(release_type)_CXXFLAGS))|' \
-e 's|@CPPFLAGS@|$(strip $(host_CPPFLAGS) $(host_$(release_type)_CPPFLAGS))|' \ -e 's|@CPPFLAGS@|$(strip $(host_CPPFLAGS) $(host_$(release_type)_CPPFLAGS))|' \
-e 's|@LDFLAGS@|$(strip $(host_LDFLAGS) $(host_$(release_type)_LDFLAGS))|' \ -e 's|@LDFLAGS@|$(strip $(host_LDFLAGS) $(host_$(release_type)_LDFLAGS))|' \
-e 's|@allow_host_packages@|$(ALLOW_HOST_PACKAGES)|' \ -e 's|@allow_host_packages@|$(ALLOW_HOST_PACKAGES)|' \

File diff suppressed because it is too large Load diff

View file

@ -7,12 +7,13 @@ darwin_CC=clang -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) -isysroo
darwin_CXX=clang++ -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) -isysroot $(OSX_SDK) -mlinker-version=$(LD64_VERSION) -stdlib=libc++ -B $(host_prefix)/native/bin darwin_CXX=clang++ -target $(host) -mmacosx-version-min=$(OSX_MIN_VERSION) -isysroot $(OSX_SDK) -mlinker-version=$(LD64_VERSION) -stdlib=libc++ -B $(host_prefix)/native/bin
darwin_CFLAGS=-pipe darwin_CFLAGS=-pipe
darwin_CXXFLAGS=$(darwin_CFLAGS) darwin_CXXFLAGS=$(darwin_CFLAGS) -std=c++11
darwin_release_CFLAGS=-O2 darwin_release_CFLAGS=-O2 -g
darwin_release_CXXFLAGS=$(darwin_release_CFLAGS) darwin_release_CXXFLAGS=$(darwin_release_CFLAGS)
darwin_debug_CFLAGS=-Og darwin_debug_CFLAGS=-Og -g
darwin_debug_CXXFLAGS=$(darwin_debug_CFLAGS) darwin_debug_CXXFLAGS=-O0 -g
darwin_native_toolchain=native_cctools
darwin_native_toolchain=native_cctools

View file

@ -1,11 +1,14 @@
linux_CFLAGS=-pipe linux_CFLAGS=-pipe
linux_CXXFLAGS=$(linux_CFLAGS) linux_CXXFLAGS=$(linux_CFLAGS) -std=c++11
linux_release_CFLAGS=-O2 linux_release_CFLAGS=-O3 -g
ifeq (1,$(shell ldd --version | head -1 | awk '{print $$NF < 2.28}'))
linux_release_CFLAGS+= -include $(BASEDIR)/glibc_version_header/force_link_glibc_2.19.h
endif
linux_release_CXXFLAGS=$(linux_release_CFLAGS) linux_release_CXXFLAGS=$(linux_release_CFLAGS)
linux_debug_CFLAGS=-Og linux_debug_CFLAGS=-O1 -g
linux_debug_CXXFLAGS=$(linux_debug_CFLAGS) linux_debug_CXXFLAGS=-O0 -g
linux_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC linux_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC

View file

@ -1,10 +1,11 @@
mingw32_CFLAGS=-pipe mingw32_CFLAGS=-pipe
mingw32_CXXFLAGS=$(mingw32_CFLAGS) mingw32_CXXFLAGS=$(mingw32_CFLAGS) -std=c++11
mingw32_release_CFLAGS=-O2 mingw32_release_CFLAGS=-O2 -g
mingw32_release_CXXFLAGS=$(mingw32_release_CFLAGS) mingw32_release_CXXFLAGS=$(mingw32_release_CFLAGS)
mingw32_debug_CFLAGS=-O1 mingw32_debug_CFLAGS=-O1 -g
mingw32_debug_CXXFLAGS=$(mingw32_debug_CFLAGS) mingw32_debug_CXXFLAGS=-O0 -g
mingw32_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC mingw32_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC

View file

@ -1,6 +1,6 @@
package=bdb package=bdb
$(package)_version=4.8.30 $(package)_version=4.8.30
$(package)_download_path=http://download.oracle.com/berkeley-db $(package)_download_path=https://download.oracle.com/berkeley-db
$(package)_file_name=db-$($(package)_version).NC.tar.gz $(package)_file_name=db-$($(package)_version).NC.tar.gz
$(package)_sha256_hash=12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef $(package)_sha256_hash=12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef
$(package)_build_subdir=build_unix $(package)_build_subdir=build_unix
@ -9,7 +9,7 @@ define $(package)_set_vars
$(package)_config_opts=--disable-shared --enable-cxx --disable-replication $(package)_config_opts=--disable-shared --enable-cxx --disable-replication
$(package)_config_opts_mingw32=--enable-mingw $(package)_config_opts_mingw32=--enable-mingw
$(package)_config_opts_linux=--with-pic $(package)_config_opts_linux=--with-pic
$(package)_cxxflags=-std=c++11 $(package)_cppflags_mingw32=-DUNICODE -D_UNICODE
endef endef
define $(package)_preprocess_cmds define $(package)_preprocess_cmds
@ -29,3 +29,4 @@ endef
define $(package)_stage_cmds define $(package)_stage_cmds
$(MAKE) DESTDIR=$($(package)_staging_dir) install_lib install_include $(MAKE) DESTDIR=$($(package)_staging_dir) install_lib install_include
endef endef

View file

@ -1,6 +1,6 @@
package=boost package=boost
$(package)_version=1_69_0 $(package)_version=1_69_0
$(package)_download_path=https://dl.bintray.com/boostorg/release/1.69.0/source/ $(package)_download_path=https://boostorg.jfrog.io/artifactory/main/release/1.69.0/source/
$(package)_file_name=$(package)_$($(package)_version).tar.bz2 $(package)_file_name=$(package)_$($(package)_version).tar.bz2
$(package)_sha256_hash=8f32d4617390d1c2d16f26a27ab60d97807b35440d45891fa340fc2648b04406 $(package)_sha256_hash=8f32d4617390d1c2d16f26a27ab60d97807b35440d45891fa340fc2648b04406
$(package)_dependencies=icu $(package)_dependencies=icu

View file

@ -22,7 +22,6 @@ define $(package)_preprocess_cmds
PKG_CONFIG_SYSROOT_DIR=/ \ PKG_CONFIG_SYSROOT_DIR=/ \
PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig \ PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig \
PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig \ PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig \
sed -i.old 's/^GEN_DEPS.cc.*/& $(CXXFLAGS)/' source/config/mh-mingw* && \
mkdir -p build && cd build && \ mkdir -p build && cd build && \
../source/runConfigureICU Linux $($(package)_standard_opts) CXXFLAGS=-std=c++11 && \ ../source/runConfigureICU Linux $($(package)_standard_opts) CXXFLAGS=-std=c++11 && \
$(MAKE) && cd .. $(MAKE) && cd ..
@ -32,6 +31,8 @@ define $(package)_config_cmds
PKG_CONFIG_SYSROOT_DIR=/ \ PKG_CONFIG_SYSROOT_DIR=/ \
PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig \ PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig \
PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig \ PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig \
sed -i.old 's|^GEN_DEPS.c=.*|& $($(package)_cflags)|' config/mh-mingw* && \
sed -i.old 's|^GEN_DEPS.cc=.*|& $($(package)_cxxflags)|' config/mh-mingw* && \
$($(package)_autoconf) $($(package)_autoconf)
endef endef

View file

@ -27,4 +27,5 @@ define $(package)_stage_cmds
endef endef
define $(package)_postprocess_cmds define $(package)_postprocess_cmds
rm lib/*.la
endef endef

View file

@ -1,6 +1,6 @@
package=miniupnpc package=miniupnpc
$(package)_version=2.0.20180203 $(package)_version=2.0.20180203
$(package)_download_path=http://miniupnp.free.fr/files $(package)_download_path=https://miniupnp.tuxfamily.org/files/
$(package)_file_name=$(package)-$($(package)_version).tar.gz $(package)_file_name=$(package)-$($(package)_version).tar.gz
$(package)_sha256_hash=90dda8c7563ca6cd4a83e23b3c66dbbea89603a1675bfdb852897c2c9cc220b7 $(package)_sha256_hash=90dda8c7563ca6cd4a83e23b3c66dbbea89603a1675bfdb852897c2c9cc220b7

View file

@ -5,7 +5,7 @@ $(package)_file_name=$(package)-$($(package)_version).tar.gz
$(package)_sha256_hash=8f9faeaebad088e772f4ef5e38252d472be4d878c6b3a2718c10a4fcebe7a41c $(package)_sha256_hash=8f9faeaebad088e772f4ef5e38252d472be4d878c6b3a2718c10a4fcebe7a41c
define $(package)_set_vars define $(package)_set_vars
$(package)_config_env=AR="$($(package)_ar)" RANLIB="$($(package)_ranlib)" CC="$($(package)_cc)" $(package)_config_env=AR="$($(package)_ar)" RANLIB="$($(package)_ranlib)" CC="$($(package)_cc) $($(package)_cflags) $($(package)_cppflags)"
$(package)_config_opts=--prefix=$(host_prefix) --openssldir=$(host_prefix)/etc/openssl $(package)_config_opts=--prefix=$(host_prefix) --openssldir=$(host_prefix)/etc/openssl
$(package)_config_opts+=no-camellia $(package)_config_opts+=no-camellia
$(package)_config_opts+=no-capieng $(package)_config_opts+=no-capieng
@ -42,7 +42,6 @@ $(package)_config_opts+=no-weak-ssl-ciphers
$(package)_config_opts+=no-whirlpool $(package)_config_opts+=no-whirlpool
$(package)_config_opts+=no-zlib $(package)_config_opts+=no-zlib
$(package)_config_opts+=no-zlib-dynamic $(package)_config_opts+=no-zlib-dynamic
$(package)_config_opts+=$($(package)_cflags) $($(package)_cppflags)
$(package)_config_opts_linux=-fPIC -Wa,--noexecstack $(package)_config_opts_linux=-fPIC -Wa,--noexecstack
$(package)_config_opts_x86_64_linux=linux-x86_64 $(package)_config_opts_x86_64_linux=linux-x86_64
$(package)_config_opts_i686_linux=linux-generic32 $(package)_config_opts_i686_linux=linux-generic32

View file

@ -4,7 +4,6 @@ $(package)_download_path=$(native_$(package)_download_path)
$(package)_file_name=$(native_$(package)_file_name) $(package)_file_name=$(native_$(package)_file_name)
$(package)_sha256_hash=$(native_$(package)_sha256_hash) $(package)_sha256_hash=$(native_$(package)_sha256_hash)
$(package)_dependencies=native_$(package) $(package)_dependencies=native_$(package)
$(package)_cxxflags=-std=c++11
define $(package)_set_vars define $(package)_set_vars
$(package)_config_opts=--disable-shared --with-protoc=$(build_prefix)/bin/protoc $(package)_config_opts=--disable-shared --with-protoc=$(build_prefix)/bin/protoc

View file

@ -6,9 +6,10 @@ $(package)_sha256_hash=bcbabe1e2c7d0eec4ed612e10b94b112dd5f06fcefa994a0c79a45d83
$(package)_patches=0001-fix-build-with-older-mingw64.patch 0002-disable-pthread_set_name_np.patch $(package)_patches=0001-fix-build-with-older-mingw64.patch 0002-disable-pthread_set_name_np.patch
define $(package)_set_vars define $(package)_set_vars
$(package)_config_opts=--without-docs --disable-shared --without-libsodium --disable-curve --disable-curve-keygen --disable-perf --disable-Werror $(package)_config_opts=--without-docs --disable-shared --without-libsodium --disable-curve --disable-curve-keygen --disable-perf --disable-Werror --disable-drafts
$(package)_config_opts += --without-libsodium --without-libgssapi_krb5 --without-pgm --without-norm --without-vmci
$(package)_config_opts += --disable-libunwind --disable-radix-tree --without-gcov
$(package)_config_opts_linux=--with-pic $(package)_config_opts_linux=--with-pic
$(package)_cxxflags=-std=c++11
endef endef
define $(package)_preprocess_cmds define $(package)_preprocess_cmds
@ -31,5 +32,5 @@ endef
define $(package)_postprocess_cmds define $(package)_postprocess_cmds
sed -i.old "s/ -lstdc++//" lib/pkgconfig/libzmq.pc && \ sed -i.old "s/ -lstdc++//" lib/pkgconfig/libzmq.pc && \
rm -rf bin share rm -rf bin share lib/*.la
endef endef

View file

@ -30,13 +30,13 @@ if which ccache >/dev/null; then
fi fi
pushd depends pushd depends
make -j`getconf _NPROCESSORS_ONLN` HOST=x86_64-apple-darwin14 NO_QT=1 V=1 make -j$(getconf _NPROCESSORS_ONLN) HOST=x86_64-apple-darwin14 NO_QT=1 V=1
popd popd
./autogen.sh ./autogen.sh
DEPS_DIR=`pwd`/depends/x86_64-apple-darwin14 DEPS_DIR=$(pwd)/depends/x86_64-apple-darwin14
CONFIG_SITE=${DEPS_DIR}/share/config.site ./configure --enable-reduce-exports --without-gui --with-icu="${DEPS_DIR}" --enable-static --disable-shared CONFIG_SITE=${DEPS_DIR}/share/config.site ./configure --enable-reduce-exports --without-gui --with-icu="${DEPS_DIR}" --enable-static --disable-shared
make -j`getconf _NPROCESSORS_ONLN` make -j$(getconf _NPROCESSORS_ONLN)
${DEPS_DIR}/native/bin/x86_64-apple-darwin14-strip src/lbrycrdd src/lbrycrd-cli src/lbrycrd-tx ${DEPS_DIR}/native/bin/x86_64-apple-darwin14-strip src/lbrycrdd src/lbrycrd-cli src/lbrycrd-tx
if which ccache >/dev/null; then if which ccache >/dev/null; then

View file

@ -20,13 +20,13 @@ export CXXFLAGS="${CXXFLAGS:--frecord-gcc-switches}"
echo "CXXFLAGS set to $CXXFLAGS" echo "CXXFLAGS set to $CXXFLAGS"
cd depends cd depends
make -j`getconf _NPROCESSORS_ONLN` HOST=x86_64-pc-linux-gnu NO_QT=1 V=1 make -j$(getconf _NPROCESSORS_ONLN) HOST=x86_64-pc-linux-gnu NO_QT=1 V=1
cd .. cd ..
./autogen.sh ./autogen.sh
DEPS_DIR=`pwd`/depends/x86_64-pc-linux-gnu DEPS_DIR=$(pwd)/depends/x86_64-pc-linux-gnu
CONFIG_SITE=${DEPS_DIR}/share/config.site ./configure --enable-static --disable-shared --with-pic --without-gui CONFIG_SITE=${DEPS_DIR}/share/config.site ./configure --enable-static --disable-shared --with-pic --without-gui
make -j`getconf _NPROCESSORS_ONLN` make -j$(getconf _NPROCESSORS_ONLN)
strip src/lbrycrdd src/lbrycrd-cli src/lbrycrd-tx strip src/lbrycrdd src/lbrycrd-cli src/lbrycrd-tx
if which ccache >/dev/null; then if which ccache >/dev/null; then
@ -34,4 +34,4 @@ if which ccache >/dev/null; then
ccache -s ccache -s
fi fi
echo "Linux 64bit build is complete" echo "Linux 64bit build is complete"

View file

@ -21,13 +21,13 @@ if which ccache >/dev/null; then
fi fi
pushd depends pushd depends
make -j`getconf _NPROCESSORS_ONLN` HOST=i686-w64-mingw32 NO_QT=1 V=1 make -j$(getconf _NPROCESSORS_ONLN) HOST=i686-w64-mingw32 NO_QT=1 V=1
popd popd
./autogen.sh ./autogen.sh
DEPS_DIR=`pwd`/depends/i686-w64-mingw32 DEPS_DIR=$(pwd)/depends/i686-w64-mingw32
CONFIG_SITE=${DEPS_DIR}/share/config.site ./configure --prefix=/ --without-gui --with-icu="$DEPS_DIR" --enable-static --disable-shared CONFIG_SITE=${DEPS_DIR}/share/config.site ./configure --prefix=/ --without-gui --with-icu="$DEPS_DIR" --enable-static --disable-shared
make -j`getconf _NPROCESSORS_ONLN` make -j$(getconf _NPROCESSORS_ONLN)
i686-w64-mingw32-strip src/lbrycrdd.exe src/lbrycrd-cli.exe src/lbrycrd-tx.exe i686-w64-mingw32-strip src/lbrycrdd.exe src/lbrycrd-cli.exe src/lbrycrd-tx.exe
if which ccache >/dev/null; then if which ccache >/dev/null; then

View file

@ -20,13 +20,13 @@ if which ccache >/dev/null; then
fi fi
pushd depends pushd depends
make -j`getconf _NPROCESSORS_ONLN` HOST=x86_64-w64-mingw32 NO_QT=1 V=1 make -j$(getconf _NPROCESSORS_ONLN) HOST=x86_64-w64-mingw32 NO_QT=1 V=1
popd popd
./autogen.sh ./autogen.sh
DEPS_DIR=`pwd`/depends/x86_64-w64-mingw32 DEPS_DIR=$(pwd)/depends/x86_64-w64-mingw32
CONFIG_SITE=${DEPS_DIR}/share/config.site ./configure --prefix=/ --without-gui --with-icu="$DEPS_DIR" --enable-static --disable-shared CONFIG_SITE=${DEPS_DIR}/share/config.site ./configure --prefix=/ --without-gui --with-icu="$DEPS_DIR" --enable-static --disable-shared
make -j`getconf _NPROCESSORS_ONLN` make -j$(getconf _NPROCESSORS_ONLN)
x86_64-w64-mingw32-strip src/lbrycrdd.exe src/lbrycrd-cli.exe src/lbrycrd-tx.exe x86_64-w64-mingw32-strip src/lbrycrdd.exe src/lbrycrd-cli.exe src/lbrycrd-tx.exe
if which ccache >/dev/null; then if which ccache >/dev/null; then

View file

@ -166,10 +166,10 @@ public:
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 1548288000; // Jan 24, 2019 consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 1548288000; // Jan 24, 2019
// The best chain should have at least this much work. // The best chain should have at least this much work.
consensus.nMinimumChainWork = uint256S("00000000000000000000000000000000000000000000024108e3204a44a57a5a"); //621000 consensus.nMinimumChainWork = uint256S("000000000000000000000000000000000000000000000499ed6684d1bf6f6fd3"); //946000
// By default assume that the signatures in ancestors of this block are valid. // By default assume that the signatures in ancestors of this block are valid.
consensus.defaultAssumeValid = uint256S("7899464514d0d8854919e87eb234fd5f0c35d06418bd5fd3c1a8f7092b2a9317"); //620000 consensus.defaultAssumeValid = uint256S("0d3b537afe49820e1c6efc555463f955251b1293c6e5130137e1e25744431172"); //946000
/** /**
* The message start string is designed to be unlikely to occur in normal data. * The message start string is designed to be unlikely to occur in normal data.
@ -195,9 +195,11 @@ public:
vSeeds.clear(); vSeeds.clear();
vFixedSeeds.clear(); vFixedSeeds.clear();
vSeeds.emplace_back("dnsseed1.lbry.io"); // lbry.io vSeeds.emplace_back("dnsseed1.lbry.io"); // LBRY Inc
vSeeds.emplace_back("dnsseed2.lbry.io"); // lbry.io vSeeds.emplace_back("dnsseed2.lbry.io"); // LBRY Inc
vSeeds.emplace_back("dnsseed3.lbry.io"); // lbry.io vSeeds.emplace_back("dnsseed3.lbry.io"); // LBRY Inc
vSeeds.emplace_back("seed.lbry.grin.io"); // Grin
vSeeds.emplace_back("seed.allaboutlbc.com"); // Madiator2011
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1, 0x55); base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1, 0x55);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1, 0x7a); base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1, 0x7a);
@ -205,8 +207,6 @@ public:
base58Prefixes[EXT_PUBLIC_KEY] = {0x04, 0x88, 0xB2, 0x1E}; base58Prefixes[EXT_PUBLIC_KEY] = {0x04, 0x88, 0xB2, 0x1E};
base58Prefixes[EXT_SECRET_KEY] = {0x04, 0x88, 0xAD, 0xE4}; base58Prefixes[EXT_SECRET_KEY] = {0x04, 0x88, 0xAD, 0xE4};
vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_main, pnSeed6_main + ARRAYLEN(pnSeed6_main));
bech32_hrp = "lbc"; bech32_hrp = "lbc";
vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_main, pnSeed6_main + ARRAYLEN(pnSeed6_main)); vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_main, pnSeed6_main + ARRAYLEN(pnSeed6_main));

View file

@ -935,10 +935,14 @@ bool CClaimTrieCacheBase::remove(T& value, const std::string& name, const COutPo
nValidAtHeight = nHeight + getDelayForName(name); nValidAtHeight = nHeight + getDelayForName(name);
std::string adjusted = adjustNameForValidHeight(name, nValidAtHeight); std::string adjusted = adjustNameForValidHeight(name, nValidAtHeight);
if (removeFromQueue(adjusted, outPoint, value) || removeFromCache(name, outPoint, value, fCheckTakeover)) { auto rfq = removeFromQueue(adjusted, outPoint, value);
if (rfq || removeFromCache(name, outPoint, value, fCheckTakeover)) {
int expirationHeight = value.nHeight + expirationTime(); int expirationHeight = value.nHeight + expirationTime();
if (auto itQueueRow = getExpirationQueueCacheRow<T>(expirationHeight, false)) if (auto itQueueRow = getExpirationQueueCacheRow<T>(expirationHeight, false)) {
eraseOutPoint(*itQueueRow, CNameOutPointType{adjusted, outPoint}); eraseOutPoint(*itQueueRow, CNameOutPointType{adjusted, outPoint});
if (adjusted != name) // workaround for an off-by-1 error in normalization block (wherein we might get both):
eraseOutPoint(*itQueueRow, CNameOutPointType{name, outPoint});
}
nValidAtHeight = value.nValidAtHeight; nValidAtHeight = value.nValidAtHeight;
return true; return true;
} }
@ -1097,9 +1101,12 @@ void CClaimTrieCacheBase::undoIncrement(insertUndoType& insertUndo, std::vector<
if (auto itExpirationRow = getExpirationQueueCacheRow<T>(nNextHeight, false)) { if (auto itExpirationRow = getExpirationQueueCacheRow<T>(nNextHeight, false)) {
for (const auto& itEntry : *itExpirationRow) { for (const auto& itEntry : *itExpirationRow) {
T value; T value;
assert(removeFromCache(itEntry.name, itEntry.outPoint, value, true)); if (removeFromCache(itEntry.name, itEntry.outPoint, value, true)) {
expireUndo.emplace_back(itEntry.name, value); expireUndo.emplace_back(itEntry.name, value);
addTo(deleted, value); addTo(deleted, value);
}
else // a bug in the normalization stuff allows some of these to stick around after the claims were spent; don't crash
LogPrintf("Warning: missing expired entry %s, %s:%d\n", itEntry.name, itEntry.outPoint.hash.GetHex(), itEntry.outPoint.n);
} }
itExpirationRow->clear(); itExpirationRow->clear();
} }

View file

@ -486,7 +486,6 @@ void SetupServerArgs()
CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE)), false, OptionsCategory::DEBUG_TEST); CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE)), false, OptionsCategory::DEBUG_TEST);
gArgs.AddArg("-printpriority", strprintf("Log transaction fee per kB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY), true, OptionsCategory::DEBUG_TEST); gArgs.AddArg("-printpriority", strprintf("Log transaction fee per kB when mining blocks (default: %u)", DEFAULT_PRINTPRIORITY), true, OptionsCategory::DEBUG_TEST);
gArgs.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -daemon. To disable logging to file, set -nodebuglogfile)", false, OptionsCategory::DEBUG_TEST); gArgs.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -daemon. To disable logging to file, set -nodebuglogfile)", false, OptionsCategory::DEBUG_TEST);
gArgs.AddArg("-shrinkdebugfile", "Shrink debug.log file on client startup (default: 1 when no -debug)", false, OptionsCategory::DEBUG_TEST);
gArgs.AddArg("-uacomment=<cmt>", "Append comment to the user agent string", false, OptionsCategory::DEBUG_TEST); gArgs.AddArg("-uacomment=<cmt>", "Append comment to the user agent string", false, OptionsCategory::DEBUG_TEST);
SetupChainParamsBaseOptions(); SetupChainParamsBaseOptions();
@ -1236,11 +1235,6 @@ bool AppInitMain()
CreatePidFile(GetPidFile(), getpid()); CreatePidFile(GetPidFile(), getpid());
#endif #endif
if (g_logger->m_print_to_file) { if (g_logger->m_print_to_file) {
if (gArgs.GetBoolArg("-shrinkdebugfile", g_logger->DefaultShrinkDebugFile())) {
// Do this first since it both loads a bunch of debug.log into memory,
// and because this needs to happen before any other debug.log printing
g_logger->ShrinkDebugFile();
}
if (!g_logger->OpenDebugLog()) { if (!g_logger->OpenDebugLog()) {
return InitError(strprintf("Could not open debug log file %s", return InitError(strprintf("Could not open debug log file %s",
g_logger->m_file_path.string())); g_logger->m_file_path.string()));

View file

@ -37,6 +37,12 @@ bool BCLog::Logger::OpenDebugLog()
assert(m_fileout == nullptr); assert(m_fileout == nullptr);
assert(!m_file_path.empty()); assert(!m_file_path.empty());
if (fs::exists(m_file_path)) {
fs::path old_file_path(m_file_path);
old_file_path += ".old";
fs::rename(m_file_path, old_file_path);
}
m_fileout = fsbridge::fopen(m_file_path, "a"); m_fileout = fsbridge::fopen(m_file_path, "a");
if (!m_fileout) { if (!m_fileout) {
return false; return false;
@ -83,11 +89,6 @@ bool BCLog::Logger::WillLogCategory(BCLog::LogFlags category) const
return (m_categories.load(std::memory_order_relaxed) & category) != 0; return (m_categories.load(std::memory_order_relaxed) & category) != 0;
} }
bool BCLog::Logger::DefaultShrinkDebugFile() const
{
return m_categories == BCLog::NONE;
}
struct CLogCategoryDesc struct CLogCategoryDesc
{ {
BCLog::LogFlags flag; BCLog::LogFlags flag;
@ -231,44 +232,3 @@ void BCLog::Logger::LogPrintStr(const std::string &str)
} }
} }
} }
void BCLog::Logger::ShrinkDebugFile()
{
// Amount of debug.log to save at end when shrinking (must fit in memory)
constexpr size_t RECENT_DEBUG_HISTORY_SIZE = 10 * 1000000;
assert(!m_file_path.empty());
// Scroll debug.log if it's getting too big
FILE* file = fsbridge::fopen(m_file_path, "r");
// Special files (e.g. device nodes) may not have a size.
size_t log_size = 0;
try {
log_size = fs::file_size(m_file_path);
} catch (boost::filesystem::filesystem_error &) {}
// If debug.log file is more than 10% bigger the RECENT_DEBUG_HISTORY_SIZE
// trim it down by saving only the last RECENT_DEBUG_HISTORY_SIZE bytes
if (file && log_size > 11 * (RECENT_DEBUG_HISTORY_SIZE / 10))
{
// Restart the file with some of the end
std::vector<char> vch(RECENT_DEBUG_HISTORY_SIZE, 0);
if (fseek(file, -((long)vch.size()), SEEK_END)) {
LogPrintf("Failed to shrink debug log file: fseek(...) failed\n");
fclose(file);
return;
}
int nBytes = fread(vch.data(), 1, vch.size(), file);
fclose(file);
file = fsbridge::fopen(m_file_path, "w");
if (file)
{
fwrite(vch.data(), 1, nBytes, file);
fclose(file);
}
}
else if (file != nullptr)
fclose(file);
}

View file

@ -93,7 +93,6 @@ namespace BCLog {
bool Enabled() const { return m_print_to_console || m_print_to_file; } bool Enabled() const { return m_print_to_console || m_print_to_file; }
bool OpenDebugLog(); bool OpenDebugLog();
void ShrinkDebugFile();
uint32_t GetCategoryMask() const { return m_categories.load(); } uint32_t GetCategoryMask() const { return m_categories.load(); }
@ -103,8 +102,6 @@ namespace BCLog {
bool DisableCategory(const std::string& str); bool DisableCategory(const std::string& str);
bool WillLogCategory(LogFlags category) const; bool WillLogCategory(LogFlags category) const;
bool DefaultShrinkDebugFile() const;
}; };
} // namespace BCLog } // namespace BCLog

View file

@ -451,7 +451,7 @@ static UniValue getblocktemplate(const JSONRPCRequest& request)
if (strMode != "template") if (strMode != "template")
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode");
if (Params().NetworkIDString() != CBaseChainParams::REGTEST) // who should own this constant? if (Params().NetworkIDString() == CBaseChainParams::MAIN)
{ {
if (!g_connman) if (!g_connman)
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
@ -567,6 +567,8 @@ static UniValue getblocktemplate(const JSONRPCRequest& request)
// NOTE: If at some point we support pre-segwit miners post-segwit-activation, this needs to take segwit support into consideration // NOTE: If at some point we support pre-segwit miners post-segwit-activation, this needs to take segwit support into consideration
const bool fPreSegWit = (ThresholdState::ACTIVE != VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache)); const bool fPreSegWit = (ThresholdState::ACTIVE != VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache));
if (!fPreSegWit && !fSupportsSegwit)
throw JSONRPCError(RPC_INVALID_PARAMETER, "Segwit support is now required. Please include \"segwit\" in the client's rules.");
UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal"); UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal");
UniValue result(UniValue::VOBJ); UniValue result(UniValue::VOBJ);
@ -621,7 +623,6 @@ static UniValue getblocktemplate(const JSONRPCRequest& request)
aMutable.push_back("time"); aMutable.push_back("time");
aMutable.push_back("transactions"); aMutable.push_back("transactions");
aMutable.push_back("prevblock"); aMutable.push_back("prevblock");
aMutable.push_back("submit/coinbase");
result.pushKV("capabilities", aCaps); result.pushKV("capabilities", aCaps);

View file

@ -64,8 +64,20 @@ void CScheduler::serviceQueue()
// Explicitly use a template here to avoid hitting that overload. // Explicitly use a template here to avoid hitting that overload.
while (!shouldStop() && !taskQueue.empty()) { while (!shouldStop() && !taskQueue.empty()) {
boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first; boost::chrono::system_clock::time_point timeToWaitFor = taskQueue.begin()->first;
if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout) try {
break; // Exit loop after timeout, it means we reached the time of the event if (newTaskScheduled.wait_until<>(lock, timeToWaitFor) == boost::cv_status::timeout) {
break; // Exit loop after timeout, it means we reached the time of the event
}
} catch (boost::thread_interrupted) {
// We need to make sure we don't ignore this, or the thread won't end
throw;
} catch (...) {
// Some boost versions have a bug that can cause a time prior to system boot (or wake from sleep) to throw an exception instead of return timeout
// See https://github.com/boostorg/thread/issues/308
// Check if the time has passed and, if so, break gracefully
if (timeToWaitFor <= boost::chrono::system_clock::now()) break;
throw;
}
} }
#endif #endif
// If there are multiple threads, the queue can empty while we're waiting (another // If there are multiple threads, the queue can empty while we're waiting (another

View file

@ -26,7 +26,7 @@ struct CDiskTxPos;
//! No need to periodic flush if at least this much space still available. //! No need to periodic flush if at least this much space still available.
static constexpr int MAX_BLOCK_COINSDB_USAGE = 10; static constexpr int MAX_BLOCK_COINSDB_USAGE = 10;
//! -dbcache default (MiB) //! -dbcache default (MiB)
static const int64_t nDefaultDbCache = 500; static const int64_t nDefaultDbCache = 560;
//! -dbbatchsize default (bytes) //! -dbbatchsize default (bytes)
static const int64_t nDefaultDbBatchSize = 16 << 20; static const int64_t nDefaultDbBatchSize = 16 << 20;
//! max. -dbcache (MiB) //! max. -dbcache (MiB)
@ -34,13 +34,13 @@ static const int64_t nMaxDbCache = sizeof(void*) > 4 ? 16384 : 1024;
//! min. -dbcache (MiB) //! min. -dbcache (MiB)
static const int64_t nMinDbCache = 4; static const int64_t nMinDbCache = 4;
//! Max memory allocated to block tree DB specific cache, if no -txindex (MiB) //! Max memory allocated to block tree DB specific cache, if no -txindex (MiB)
static const int64_t nMaxBlockDBCache = 4; static const int64_t nMaxBlockDBCache = 16;
//! Max memory allocated to block tree DB specific cache, if -txindex (MiB) //! Max memory allocated to block tree DB specific cache, if -txindex (MiB)
// Unlike for the UTXO database, for the txindex scenario the leveldb cache make // Unlike for the UTXO database, for the txindex scenario the leveldb cache make
// a meaningful difference: https://github.com/bitcoin/bitcoin/pull/8273#issuecomment-229601991 // a meaningful difference: https://github.com/bitcoin/bitcoin/pull/8273#issuecomment-229601991
static const int64_t nMaxTxIndexCache = 1024; static const int64_t nMaxTxIndexCache = 1024;
//! Max memory allocated to coin DB specific cache (MiB) //! Max memory allocated to coin DB specific cache (MiB)
static const int64_t nMaxCoinsDBCache = 8; static const int64_t nMaxCoinsDBCache = 32;
/** CCoinsView backed by the coin database (chainstate/) */ /** CCoinsView backed by the coin database (chainstate/) */
class CCoinsViewDB final : public CCoinsView class CCoinsViewDB final : public CCoinsView

View file

@ -2,7 +2,7 @@
set -e set -e
srcdir="$(dirname $0)" srcdir="$(dirname $0)"
cd "$srcdir" cd "$srcdir"
if [ -z ${LIBTOOLIZE} ] && GLIBTOOLIZE="`which glibtoolize 2>/dev/null`"; then if [ -z ${LIBTOOLIZE} ] && GLIBTOOLIZE="$(which glibtoolize 2>/dev/null)"; then
LIBTOOLIZE="${GLIBTOOLIZE}" LIBTOOLIZE="${GLIBTOOLIZE}"
export LIBTOOLIZE export LIBTOOLIZE
fi fi

View file

@ -2772,13 +2772,15 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
if (ShutdownRequested()) if (ShutdownRequested())
break; break;
} while (pindexNewTip != pindexMostWork); } while (pindexNewTip != pindexMostWork);
CheckBlockIndex(chainparams.GetConsensus());
auto& consensus = chainparams.GetConsensus();
CheckBlockIndex(consensus);
auto flushMode = FlushStateMode::PERIODIC; auto flushMode = FlushStateMode::PERIODIC;
if (pindexNewTip && pindexNewTip->nTime + chainparams.GetConsensus().nPowTargetSpacing > GetAdjustedTime()) { if (pindexNewTip && chainparams.NetworkIDString() != CBaseChainParams::REGTEST
&& pindexNewTip->nTime + consensus.nPowTargetSpacing > GetAdjustedTime()) {
// trying to ensure that we flush to disk after new blocks when we're caught up to the chain // trying to ensure that we flush to disk after new blocks when we're caught up to the chain
// they're technically allowed to be two hours late, but experience says one minute is more likely // they're technically allowed to be two hours late, but experience says one minute is more likely
// LogPrintf("Added tip with time %d but it is now %ll\n", pindexNewTip->nTime, GetAdjustedTime());
flushMode = FlushStateMode::ALWAYS; flushMode = FlushStateMode::ALWAYS;
} }
return FlushStateToDisk(chainparams, state, flushMode); return FlushStateToDisk(chainparams, state, flushMode);

View file

@ -57,7 +57,7 @@ static const bool DEFAULT_WHITELISTFORCERELAY = true;
/** Default for -minrelaytxfee, minimum relay fee for transactions */ /** Default for -minrelaytxfee, minimum relay fee for transactions */
static const unsigned int DEFAULT_MIN_RELAY_TX_FEE = 1000; static const unsigned int DEFAULT_MIN_RELAY_TX_FEE = 1000;
//! -maxtxfee default //! -maxtxfee default
static const CAmount DEFAULT_TRANSACTION_MAXFEE = 0.1 * COIN; static const CAmount DEFAULT_TRANSACTION_MAXFEE = 0.5 * COIN;
//! Discourage users to set fees higher than this amount (in satoshis) per kB //! Discourage users to set fees higher than this amount (in satoshis) per kB
static const CAmount HIGH_TX_FEE_PER_KB = 0.01 * COIN; static const CAmount HIGH_TX_FEE_PER_KB = 0.01 * COIN;
//! -maxtxfee will warn if called with a higher fee than this amount (in satoshis) //! -maxtxfee will warn if called with a higher fee than this amount (in satoshis)

View file

@ -469,10 +469,10 @@ bool BerkeleyEnvironment::Salvage(const std::string& strFile, bool fAggressive,
} }
void BerkeleyEnvironment::CheckpointLSN(const std::string& strFile) void BerkeleyEnvironment::CheckpointLSN(const std::string& strFile, bool lsnReset)
{ {
dbenv->txn_checkpoint(0, 0, 0); dbenv->txn_checkpoint(0, 0, 0);
if (fMockDb) if (fMockDb || !lsnReset)
return; return;
dbenv->lsn_reset(strFile.c_str(), 0); dbenv->lsn_reset(strFile.c_str(), 0);
} }
@ -799,7 +799,7 @@ bool BerkeleyBatch::PeriodicFlush(BerkeleyDatabase& database)
// Flush wallet file so it's self contained // Flush wallet file so it's self contained
env->CloseDb(strFile); env->CloseDb(strFile);
env->CheckpointLSN(strFile); env->CheckpointLSN(strFile, false); // too expensive to reset LSN periodically (and it's triggered on flush or backup
env->mapFileUseCount.erase(mi++); env->mapFileUseCount.erase(mi++);
LogPrint(BCLog::DB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart); LogPrint(BCLog::DB, "Flushed %s %dms\n", strFile, GetTimeMillis() - nStart);

View file

@ -83,7 +83,7 @@ public:
bool Open(bool retry); bool Open(bool retry);
void Close(); void Close();
void Flush(bool fShutdown); void Flush(bool fShutdown);
void CheckpointLSN(const std::string& strFile); void CheckpointLSN(const std::string& strFile, bool lsnReset = true);
void CloseDb(const std::string& strFile); void CloseDb(const std::string& strFile);
void ReloadDbEnv(); void ReloadDbEnv();

View file

@ -18,12 +18,12 @@ if test "x$1" = "x"; then
fi fi
RET=0 RET=0
PREV_BRANCH=`git name-rev --name-only HEAD` PREV_BRANCH=$(git name-rev --name-only HEAD)
PREV_HEAD=`git rev-parse HEAD` PREV_HEAD=$(git rev-parse HEAD)
for i in `git rev-list --reverse $1`; do for i in $(git rev-list --reverse $1); do
if git rev-list -n 1 --pretty="%s" $i | grep -q "^scripted-diff:"; then if git rev-list -n 1 --pretty="%s" $i | grep -q "^scripted-diff:"; then
git checkout --quiet $i^ || exit git checkout --quiet $i^ || exit
SCRIPT="`git rev-list --format=%b -n1 $i | sed '/^-BEGIN VERIFY SCRIPT-$/,/^-END VERIFY SCRIPT-$/{//!b};d'`" SCRIPT="$(git rev-list --format=%b -n1 $i | sed '/^-BEGIN VERIFY SCRIPT-$/,/^-END VERIFY SCRIPT-$/{//!b};d')"
if test "x$SCRIPT" = "x"; then if test "x$SCRIPT" = "x"; then
echo "Error: missing script for: $i" echo "Error: missing script for: $i"
echo "Failed" echo "Failed"