diff --git a/contrib/k8s-lbry/.gitignore b/contrib/k8s-lbry/.gitignore index ebf0455..e6a67b5 100644 --- a/contrib/k8s-lbry/.gitignore +++ b/contrib/k8s-lbry/.gitignore @@ -1,4 +1,6 @@ index.yaml *.tgz -requirements-dev.yaml values-dev.yaml +values-staging.yaml +values-production.yaml +completion.bash.inc diff --git a/contrib/k8s-lbry/Chart.yaml b/contrib/k8s-lbry/Chart.yaml index 9c84aec..aba2d66 100644 --- a/contrib/k8s-lbry/Chart.yaml +++ b/contrib/k8s-lbry/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: "1.0" description: LBRY on Kubernetes with Helm name: k8s-lbry -version: 0.1.1 +version: 0.1.4 maintainers: - email: ryan@enigmacurry.com name: EnigmaCurry diff --git a/contrib/k8s-lbry/README.md b/contrib/k8s-lbry/README.md index a7c99c3..5f354fa 100644 --- a/contrib/k8s-lbry/README.md +++ b/contrib/k8s-lbry/README.md @@ -2,12 +2,12 @@ Contributing Author: [EnigmaCurry](https://www.enigmacurry.com) -Last Update: May 6 2019 +Last Update: May 30 2019 Deploy lbrycrd, lbrynet, chainquery, mysql, and spee.ch on your Kubernetes cluster. -[![asciicast](https://asciinema.org/a/fkVzPW05vKFEjBXdDp6I81odA.svg)](https://asciinema.org/a/fkVzPW05vKFEjBXdDp6I81odA) +[![asciicast](https://enigmacurry.github.io/kick-ascii/cast/k8s-lbry.png)](https://enigmacurry.github.io/kick-ascii/?cast=k8s-lbry&bg=lbry.png) @@ -15,54 +15,77 @@ cluster. - [Requirements](#requirements) -- [Helm Charts](#helm-charts) - * [Tiller](#tiller) - * [nginx-ingress](#nginx-ingress) - * [cert-manager](#cert-manager) - * [k8s-lbry](#k8s-lbry) +- [Security Notice](#security-notice) +- [Installation](#installation) + * [Create a project directory](#create-a-project-directory) + * [Setup alias and tab completion](#setup-alias-and-tab-completion) + * [k8s-lbry setup](#k8s-lbry-setup) + * [k8s-lbry install-nginx-ingress](#k8s-lbry-install-nginx-ingress) + * [k8s-lbry install-cert-manager](#k8s-lbry-install-cert-manager) + * [k8s-lbry install](#k8s-lbry-install) + * [k8s-lbry upgrade](#k8s-lbry-upgrade) +- [Services](#services) * [lbrycrd](#lbrycrd) * [chainquery](#chainquery) + [MySQL for chainquery](#mysql-for-chainquery) + [Start chainquery](#start-chainquery) + [Startup chainquery with a database snapshot](#startup-chainquery-with-a-database-snapshot) - * [lbrynet](#lbrynet) + * [lbrynet API service (not for spee.ch)](#lbrynet-api-service-not-for-speech) + [IMPORTANT - Backup your cluster wallet](#important---backup-your-cluster-wallet) - * [spee.ch](#speech) - + [MySQL for speech](#mysql-for-speech) - + [Configure Speech](#configure-speech) -- [TLS Support](#tls-support) - * [Assign DNS name(s) to your Load Balancer](#assign-dns-names-to-your-load-balancer) - * [Enable TLS](#enable-tls) -- [Improvements](#improvements) + * [spee.ch (and lbrynet sidecar and mysql)](#speech-and-lbrynet-sidecar-and-mysql) + + [IMPORTANT - Backup your speech wallet](#important---backup-your-speech-wallet) + + [Fund your speech wallet](#fund-your-speech-wallet) + + [Create a thumbnail channel](#create-a-thumbnail-channel) + + [Finish speech setup](#finish-speech-setup) +- [Extra commands that k8s-lbry (run.sh) provides](#extra-commands-that-k8s-lbry-runsh-provides) + * [k8s-lbry helm](#k8s-lbry-helm) + * [k8s-lbry kubectl](#k8s-lbry-kubectl) + * [k8s-lbry logs](#k8s-lbry-logs) + * [k8s-lbry shell](#k8s-lbry-shell) + * [k8s-lbry shell-pvc](#k8s-lbry-shell-pvc) + * [k8s-lbry restart](#k8s-lbry-restart) + * [k8s-lbry lbrynet](#k8s-lbry-lbrynet) + * [k8s-lbry chainquery-mysql-client](#k8s-lbry-chainquery-mysql-client) + * [k8s-lbry speech-mysql-client](#k8s-lbry-speech-mysql-client) + * [k8s-lbry lbrynet-copy-wallet](#k8s-lbry-lbrynet-copy-wallet-) + * [k8s-lbry package](#k8s-lbry-package) +- [TLS / SSL / HTTPS](#tls--ssl--https) +- [Cloud specific notes](#cloud-specific-notes) + * [AWS](#aws) + * [minikube](#minikube) +- [Uninstall](#uninstall) ## Requirements - * A Kubernetes cluster with role-based access control (RBAC) enabled. - * This tutorial was tested on a fresh DigitalOcean managed cluster on nodes - with 8GB of RAM, on kubernetes 1.13.5. - * [kubectl command line - tool](https://kubernetes.io/docs/tasks/tools/install-kubectl/) installed on - your local development machine. - * Tested with kubectl v1.14.0 - * [Helm command line tool](https://github.com/helm/helm/releases) installed on - your local development machine. - * Tested with helm v2.13.1 + * A Kubernetes cluster. + * Tested on DigitalOcean managed Kubernetes cluster on nodes with 8GB of RAM, + on kubernetes 1.14.1. + * Tested on AWS with [Charmed Kubenetes + Distribution](https://www.ubuntu.com/kubernetes/docs/quickstart) - See + [AWS specific notes](#aws). + * Tested on + [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) for a + self-contained virtual machine running kubernetes in VirtualBox - - See + [minikube specific notes](#minikube). + * Local development machine dependencies: + * [GNU Bash](https://www.gnu.org/software/bash/) and friends. If you're on + Linux or Mac, you should be good to go. + * [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + * Tested with kubectl v1.14.0 + * [helm](https://github.com/helm/helm/releases) + * Tested with helm v2.13.1 + * Optional: for TLS / HTTPs support, you will also need an internet domain + name, and the ability to update its DNS. -Your cloud provider should have instructions for setting up kubectl to talk to +Your cloud provider should have instructions for setting up `kubectl` to talk to your cluster. This usually involves downloading a config file and putting it in `$HOME/.kube/config`. (The file has to be renamed `config` and put in the `$HOME/.kube` directory.) -Note: If you want to download the cluster config to a location other than -`$HOME/.kube/config`, you can set the `KUBECONFIG` environment variable to the -full path of your config file, or create a symlink from your config file to -`$HOME/.kube/config`, or you can use the `--kubeconfig` parameter to both -`kubectl` and `helm` commands every time you use them. - -Test that your kubectl can talk to your cluster, by querying for a list of running -nodes: +Test that your `kubectl` can talk to your cluster, by querying for a list of +running nodes: ``` kubectl get nodes @@ -71,553 +94,769 @@ kubectl get nodes If everything is working, you should see a list of one or more nodes running and showing `STATUS=Ready` -## Helm Charts +## Security Notice + +Any cryptocurrency wallet that is online is a security concern. For any +real-world production deployment, you will need to review this architecture +closely to see if it fits with your chosen platform and network environment. + +This system is currently designed for a kubernetes cluster that has a single +administrator (or possibly a small team of trusted users). It will not support +untrusted multi-tenancy out of the box. + +All of the services are created in their own namespace, but no [Security +Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) have +been applied to the pods. + +The Helm configuration file contains *all* of the configuration for the system, +*including passwords* in plain text. + +The lbrynet SDK wallets are individually stored unencrypted in their own +persistent volumes. + +## Installation This system is installed via [Helm](https://helm.sh/docs/), the package manager for Kubernetes. [Helm Charts](https://helm.sh/docs/developing_charts/#charts) are the basis for packages in Helm. This directory is a Helm chart itself. -### Tiller +All of the helm and kubectl commands necessary to install, upgrade, and maintain +your deployments, are wrapped in the included [`run.sh`](run.sh) script. For +debugging purposes, this wrapper also prints to stdout the full underlying +commands (helm, kubectl, etc) as they are run. -Tiller is the cluster-side component of helm, and needs to be installed before -you can use helm with your cluster. Run the following to install tiller to your -cluster: +### Create a project directory + +Create a new directory someplace to store your deployment configuration. For the +rest of this tutorial, you will work from this directory: ``` -kubectl -n kube-system create serviceaccount tiller +mkdir $HOME/k8s-lbry-test -kubectl create clusterrolebinding tiller --clusterrole cluster-admin \ - --serviceaccount=kube-system:tiller - -helm init --service-account tiller -helm repo update +cd $HOME/k8s-lbry-test ``` -Now you can use helm locally to install things to your remote cluster. +Download `run.sh` to this same directory: -### nginx-ingress +``` +curl -Lo run.sh https://raw.githubusercontent.com/EnigmaCurry/lbry-docker/k8s-lbry/contrib/k8s-lbry/run.sh + +chmod a+x run.sh +``` + +### Setup alias and tab completion + +`run.sh` can be run directly without any setup. However, without adding it to +your `PATH`, you need to specify the full path to the script each time. Setting +a bash alias for `run.sh` is the quickest way of setting up to run from +anywhere, as well as activating support for bash tab completion. + +One time setup to install alias to `$HOME/.bashrc`: + +``` +./run.sh setup-alias +``` + +It should prompt you if it is OK for the script to edit `$HOME/.bashrc`. Once +you confirm, close your terminal session, then reopen it. + +Verify the new `k8s-lbry` alias to `run.sh` is working: + +``` +k8s-lbry kubectl get nodes +``` + +Notice that tab completion should work throughout typing the above command. + +### k8s-lbry setup + +Setup will check for dependencies, update helm repositories, and create an +initial config file (`values-dev.yaml`). + +``` +k8s-lbry setup +``` + +### k8s-lbry install-nginx-ingress An Ingress Controller ([nginx-ingress](https://github.com/helm/charts/tree/master/stable/nginx-ingress)) will help you to route outside internet traffic into your cluster. nginx-ingress will also help terminate TLS connections (SSL) so that your containers don't -need to worry about encryption. +need to worry about encryption of traffic. -Install nginx-ingress, with HTTPs turned off initially: +Install nginx-ingress into the `k8s-lbry` namespace: ``` -helm install stable/nginx-ingress --name nginx-ingress \ - --set nginx-ingress.controller.service.enableHttps=false +k8s-lbry install-nginx-ingress ``` -### cert-manager +### k8s-lbry install-cert-manager [cert-manager](https://docs.cert-manager.io/en/latest/index.html) will provide TLS certificates (SSL) for your cluster, using [Let's Encrypt](https://letsencrypt.org/). -Install cert-manager: +Install cert-manager into the `cert-manager` namespace: ``` -kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.7/deploy/manifests/00-crds.yaml - -helm repo add jetstack https://charts.jetstack.io -helm repo update - -helm install --name cert-manager --namespace cert-manager jetstack/cert-manager --version v0.7.1 - -kubectl label namespace cert-manager certmanager.k8s.io/disable-validation="true" +k8s-lbry install-cert-manager ``` -### k8s-lbry +### k8s-lbry install -The k8s-lbry helm chart installs lbrycrd, chainquery, lbrynet, and mysql. +Once nginx-ingress and cert-manager are installed, the main helm chart can be +installed. This installs lbrycrd, chainquery, lbrynet, spee.ch, and mysql, +depending on what you enable in `values-dev.yaml`. -Wait for the Load Balancer to show an External IP: +Find the External IP address for your load balancer: ``` -kubectl get svc -l app=nginx-ingress,component=controller -w +k8s-lbry kubectl get svc nginx-ingress-controller -o wide ``` -Press Ctrl-C to quit once you see the External IP listed (and not ``). +If you find a hostname instead of an IP address, this means your load balancer +has multiple IP addresses. In this case, you will need to resolve the domain +name to find the IP addresses. If this affects you, [paste the hostname into +this tool](https://toolbox.googleapps.com/apps/dig/). Look for the `;ANSWER` +section and you should see two or more IP addresses listed. Since lbrycrd will +only advertise one IP address, pick just one of the IP addresses to use for the +purposes of this tutorial. -Add the `k8s-lbry` helm repository: +You must edit your own `values-dev.yaml`. (The setup procedure created an +initial configuration in the same directory as `run.sh`.) To use a different +config file, export the `VALUES` environment variable before subsequent +commands, specifying the full path to your values file. + +Edit `values-dev.yaml`. You only need to change one thing right now: + + * Change `lbrycrd.configurationFile.lbrycrd.conf` at the bottom of this section + find `externalip=` and set it equal to the External IP address of the Load + Balancer obtained above. (Example: `externalip=123.123.123.123`) + +Save `values-dev.yaml`. + +Now run the install script to create the new release: ``` -helm repo add k8s-lbry https://k8s-lbry.sfo2.digitaloceanspaces.com +k8s-lbry install ``` -Create a directory to store your configuration file for `k8s-lbry`. You can -download the default configuration file for the helm chart -([values.yaml](values.yaml)): +### k8s-lbry upgrade + +For helm, `upgrade` does not necessarily mean you are upgrading to a new version +of any particular software, `upgrade` just means to apply your configuration +file to the cluster. If you edit `values-dev.yaml`, you then need to apply your +changes with `k8s-lbry upgrade`. + +You can make changes to `values-dev.yaml` at any time. You can apply your +configuration to your cluster by upgrading the release: ``` -VALUES=https://raw.githubusercontent.com/lbryio/lbry-docker/master/contrib/k8s-lbry/values.yaml - -curl -Lo values.yaml $VALUES +k8s-lbry upgrade ``` -`values.yaml` is your own configuration file for `k8s-lbry`. You will need it -everytime you need to update your deployment. Commit the file to a git -repository, or save it someplace safe. - -Edit `values.yaml`, change the following things: - - * Change `lbrycrd.configurationFile.lbrycrd.conf` at the bottom find - `externalip=` and set it equal to the External IP address of the Load - Balancer obtained above. - * Change `cert-manager-issuer.email` to your email address to receive notices - from Let's Encrypt. (Only used if you choose to enable TLS.) - * Change `echo-http-server.hostname` to any domain name you choose. (It must be - a real internet domain that you control, if you choose to enable TLS.) - -Save `values.yaml`. - -Now install `k8s-lbry`: +You can upgrade as often as you want. Each time you upgrade the release, helm +increases the `REVISION` number: ``` -helm install -n k8s-lbry k8s-lbry/k8s-lbry -f values.yaml +k8s-lbry helm ls ``` -This will create a new helm release for your cluster called `k8s-lbry`, from the -helm repository called `k8s-lbry`, using the package named `k8s-lbry`, using the -local configuration file called `values.yaml`. +## Services ### lbrycrd -Find the lbrycrd pod to ensure it has started correctly: +After running the installation above, you should now have a running lbrycrd pod. +Verify this by listing the pods for the `k8s-lbry` namespace: ``` -kubectl get pods -l app=lbrycrd +k8s-lbry kubectl get pods ``` -Tail the logs (Press Ctrl-C to quit): +You should see a pod listed with a name that starts with `lbrycrd`. + +Check the lbrycrd logs: ``` -kubectl logs -f -l app=lbrycrd +k8s-lbry logs lbrycrd ``` -You can use lbrycrd-cli from the running pod: +Press Ctrl-C to stop viewing the log. + +It is advisable to wait for lbrycrd to synchronize with the full blockchain +before starting other services, so watch the logs until synchronization +completes (`progress=1.0`). + +You can utilize `lbrycrd-cli` as well: ``` -POD=`kubectl get pod -l app=lbrycrd -o name | sed s+pod/++` && \ - kubectl exec $POD -- lbrycrd-cli -rpcuser=lbry -rpcpassword=lbry getinfo +k8s-lbry lbrycrd-cli --help ``` -Upgrade the nginx-ingress release to allow forwarding port 9246 to lbrycrd: - -``` -helm upgrade nginx-ingress stable/nginx-ingress \ - --set tcp.9246="default/k8s-lbry-lbrycrd:9246" -``` - -Verify the port is now open (9246 listed under PORTS): - -``` -kubectl get svc nginx-ingress-controller -``` - -After your lbrycrd service has been online for awhile, check back with the -`lbrcrd-cli getinfo` command from above. You will know that nginx-ingress is -properly connected to lbrycrd if you see that the number of connections listed -is a number greater than 8. - ### chainquery #### MySQL for chainquery [MySQL](https://github.com/helm/charts/tree/master/stable/mysql) is used as the database chainquery talks to. -Edit `values.yaml` and set `chainquery-mysql.enabled` to `true`. +Edit `values-dev.yaml` and set `chainquery-mysql.enabled` to `true`. Upgrade the release to turn on mysql for chainquery: ``` -helm upgrade k8s-lbry k8s-lbry/k8s-lbry -f values.yaml +k8s-lbry upgrade ``` -You can try logging into the mysql shell if you like (default password is -`chainquery`): +You can try logging into the mysql shell if you like: ``` -POD=`kubectl get pod -l app=k8s-lbry-chainquery-mysql -o name | sed s+pod/++` && \ - kubectl exec -it $POD -- mysql -u chainquery -p +k8s-lbry chainquery-mysql-client ``` You can view the mysql logs: ``` -kubectl logs -l app=k8s-lbry-chainquery-mysql -f +k8s-lbry logs chainquery-mysql ``` +Press Ctrl-C to stop viewing the log. + #### Start chainquery -Edit `values.yaml` and set `chainquery.enabled` to `true`. +Edit `values-dev.yaml` and set `chainquery.enabled` to `true`. Upgrade the release to turn on chainquery: ``` -helm upgrade k8s-lbry k8s-lbry/k8s-lbry -f values.yaml +k8s-lbry upgrade ``` You can view the chainquery logs: ``` -kubectl logs -l app=chainquery -f +k8s-lbry logs chainquery ``` #### Startup chainquery with a database snapshot If chainquery is starting with a blank MySQL database, it will take several days -to synchronize with the full lbycrd blockchain. If this is OK, you can just +to synchronize with the full lbrycrd blockchain. If this is OK, you can just watch the chainquery logs and wait for it to get to the [current block height](https://explorer.lbry.io/). -If you cannot wait that long, you may start from a database snapshot to speed up -this process. - -Delete the chainquery and mysql deployments: +If you cannot wait that long, you can scrap your existing chainquery database +and restart from a more recent database snapshot: ``` -kubectl delete deployments k8s-lbry-chainquery k8s-lbry-chainquery-mysql +k8s-lbry chainquery-override-snapshot ``` -The pods will automatically terminate. +This will prompt if you really wish to destroy the current chainquery database. +If you confirm, the existing chainquery and chainquery-mysql deployments will be +deleted, and pods will be terminated, ***and the contents of the Persistent +Volume Claim (PVC) for chainquery-mysql will be deleted.*** The snapshot will be +downloaded and restored in its place. -The mysql data still exists in a PersistentVolumeClaim, `k8s-lbry-chainquery-mysql`. Check -that it still exists: +Once the snapshot is restored, upgrade the release to restore the chainquery and +chainquery-mysql deployments, and restart pods: ``` -kubectl get pvc -``` - -There's an included script to start a utility container with a PersistentVolume -attached. Download the script: - -``` -SCRIPT=https://raw.githubusercontent.com/lbryio/lbry-docker/master/contrib/k8s-lbry/scripts/kubectl-run-with-pvc.sh - -curl -Lo kubectl-run-with-pvc.sh $SCRIPT && chmod a+x kubectl-run-with-pvc.sh -``` - -Run the `kubectl-run-with-pvc` script, attaching the mysql PVC: - -``` -./kubectl-run-with-pvc.sh k8s-lbry-chainquery-mysql -``` - -Wait a second for the container to start, and you should then be placed into a -container shell, indicated by the shell prompt changing to the container's -prompt. - -In the container shell, delete any existing mysql data from the volume: - -``` -rm /pvcs/k8s-lbry-chainquery-mysql/* -rf -``` - -Still in the container shell, download the backup and extract it to the volume: - -``` -apt update && apt install -y curl - -BACKUP_URL=https://lbry-chainquery-mysql-dump.sfo2.digitaloceanspaces.com/chainquery_height_560900.mysql-backup.tar.gz -curl $BACKUP_URL | tar xvz -C /pvcs/k8s-lbry-chainquery-mysql/ -``` - -Once the download and extraction completes, exit the container (or just press -Ctrl-D): - -``` -exit -``` - -Now back on your local shell, upgrade the release to re-create the mysql and -chainquery deployments: - -``` -helm upgrade k8s-lbry k8s-lbry/k8s-lbry -f values.yaml +k8s-lbry upgrade ``` You can verify that the database now has data up to the height of the database -snapshot. Login to the mysql shell (password: `chainquery`): +snapshot. Login to the mysql shell: ``` -POD=`kubectl get pod -l app=k8s-lbry-chainquery-mysql -o name | sed s+pod/++` && \ - kubectl exec -it $POD -- mysql -u chainquery -p +k8s-lbry chainquery-mysql-client ``` -Then query for the number of blocks: +Then query for the latest block height: ``` -mysql> select count(*) from chainquery.block; -+----------+ -| count(*) | -+----------+ -| 561034 | -+----------+ -1 row in set (15.00 sec) +mysql> select height from chainquery.block order by id desc limit 1; ++--------+ +| height | ++--------+ +| 561080 | ++--------+ +1 row in set (0.00 sec) ``` Also verify that chainquery is again happy. View the chainquery logs: ``` -kubectl logs -l app=chainquery -f +k8s-lbry logs chainquery ``` -### lbrynet +Press Ctrl-C to quit viewing the logs. -Edit `values.yaml` and set `lbrynet.enabled` to `true`. +### lbrynet API service (not for spee.ch) -Update the release to turn on lbrynet: +This is for a standalone lbrynet API service inside your cluster. Blob storage +goes to its own persistent volume, but is configured with `save_files=false`. +There is no outside access to the Downloads directory provided. You can stream +blobs from lbrynet via `http://lbrynet:5279/get/CLAIM_NAME/CLAIM_ID`. + +This particular lbrynet configuration won't work for spee.ch (v0.5.12). spee.ch +needs direct access to the Downloads directory of lbrynet. **If you are wanting +lbrynet for spee.ch, skip this section, and head directly to the [spee.ch +section](#speech-and-lbrynet-sidecar-and-mysql), which implements its own +lbrynet sidecar.** + +Edit `values-dev.yaml` and set `lbrynet.enabled` to `true`. + +Upgrade the release to turn on lbrynet: ``` -helm upgrade k8s-lbry k8s-lbry/k8s-lbry -f values.yaml +k8s-lbry upgrade ``` You can view the lbrynet logs: ``` -kubectl logs -l app=lbrynet -f +k8s-lbry logs lbrynet ``` #### IMPORTANT - Backup your cluster wallet -The wallet is stored inside the `k8s-lbry-lbrynet` persistent volume. +The wallet is created inside the `lbrynet` persistent volume. Copy the wallet in case the volume gets destroyed: ``` -WALLET=/home/lbrynet/.local/share/lbry/lbryum/wallets/default_wallet \ -POD=`kubectl get pod -l app=lbrynet -o name | sed s+pod/++` && \ - kubectl cp $POD:$WALLET /tmp/k8s-lbry-lbrynet-wallet-backup.json +k8s-lbry lbrynet-copy-wallet /tmp/k8s-lbry-lbrynet-wallet-backup.json ``` Check the contents of `/tmp/k8s-lbry-lbrynet-wallet-backup.json` and move the -file to a safe place for backup (and delete this temporary file.) +file to a safe place for backup (make sure to delete the temporary file.) Once your wallet is backed up, you can generate a receiving address in order to deposit LBC: ``` -POD=`kubectl get pod -l app=lbrynet -o name | sed s+pod/++` && \ - kubectl exec $POD -- lbrynet address unused +k8s-lbry lbrynet address unused ``` -### spee.ch +### spee.ch (and lbrynet sidecar and mysql) -Note: Throughout this deployment, the unstylized name `speech` is used. +*Note: Throughout this deployment, the unstylized name `speech` is used.* -#### MySQL for speech -[MySQL](https://github.com/helm/charts/tree/master/stable/mysql) is used as -the database speech talks to. +Speech needs three containers, running in two pods: -Edit `values.yaml` and set `speech-mysql.enabled` to `true`. - -Upgrade the release to turn on mysql for speech: - -``` -helm upgrade k8s-lbry k8s-lbry/k8s-lbry -f values.yaml -``` - -You can try logging into the mysql shell if you like (default password is -`speech`): - -``` -POD=`kubectl get pod -l app=k8s-lbry-speech-mysql -o name | sed s+pod/++` && \ - kubectl exec -it $POD -- mysql -u speech -p -``` - -You can view the mysql logs: - -``` -kubectl logs -l app=k8s-lbry-speech-mysql -f -``` - -#### Configure Speech - -Before you can fully configure speech, you must fund your lbrynet wallet in the -`k8s-lbry-lbrynet` deployment. Check the lbrynet section for details on -generating a receiving address for your wallet, as well as backing up your -wallet. - -Speech has a large configuration, all of which is found in `values.yaml`. The -most important settings to configure yourself are: - - * `speech.enabled` - turns on/off the the speech deployment. - * `speech.service.hostname` - The external hostname for speech. - * `speech.persistence.size` - How large of a data directory for speech. - * `speech.auth.masterPassword` - * `speech.details` - * `speech.publishing.primaryClaimAddress` - - * This can be retrieved from the lbrynet pod: - -``` -POD=`kubectl get pod -l app=lbrynet -o name | sed s+pod/++` && \ - kubectl exec $POD -- lbrynet address list -``` - - * Copy the first address from the list. This is your `primaryClaimAddress`. - - * `speech.publishing.publishOnlyApproved` - * `speech.publishing.approvedChannels` - * `speech.publishing.thumbnailChannel` + * `speech` pod: - * In order to publish thumbnails, you must create a channel. There are many options in creation. See the help from the lbrynet command to list them all: - -``` -POD=`kubectl get pod -l app=lbrynet -o name | sed s+pod/++` && \ - kubectl exec $POD -- lbrynet channel create --help -``` - - * For example, this will create the channel named `YourChannel`, bidding 1 LBC for the name: + * speech, the nodejs server container. -``` -POD=`kubectl get pod -l app=lbrynet -o name | sed s+pod/++` && \ - kubectl exec $POD -- lbrynet channel create --name @YourChannel --bid 1.0 -``` - - * Make sure that when you copy the channel name to `values.yaml` that you use double quotes surrounding the value for thumbnailChannel. This is because in YAML, the `@` symbol cannot be used without quotes. ie: `thumbnailChannel: "@YourChannel"` - - * `speech.publishing.thumbnailChannelId` + * lbrynet, running in the same pod as speech, so as to share one downloads + directory. (This is called a 'sidecar' container, which is guaranteed to + run on the same kubernetes node as the spee.ch container.) + + * `speech-mysql` pod: - * When you create the channel, listed in the `outputs` section, you will find -`claim_id`; this is the `thumbnailChannelId`. You can also retrieve this -information again by running `channel list`: + * mysql for storing the speech database. + +Edit `values-dev.yaml`. + + * Set `speech-mysql.enabled` to `true`. + * Set `speech.enabled` to `true`. + * Set `speech.service.hostname` to your subdomain name for speech. + * Set `speech.site.details.host` to your subdomain name for speech. + * Set `speech.site.details.ipAddress` to your Load Balancer external IP address. + * Set `speech.site.details.title` + +Upgrade the release to turn on `speech`, `speech-lbrynet`, and `speech-mysql`: ``` -POD=`kubectl get pod -l app=lbrynet -o name | sed s+pod/++` && \ - kubectl exec $POD -- lbrynet channel list +k8s-lbry upgrade ``` -Once you've configured speech in `values.yaml`, upgrade the helm release to -apply the changes: +Speech will not work yet! Continue on through the next sections. + +#### IMPORTANT - Backup your speech wallet + +The wallet for speech is created inside the `speech-lbrynet` persistent volume. + +Copy the wallet in case the volume gets destroyed: ``` -helm upgrade k8s-lbry k8s-lbry/k8s-lbry -f values.yaml +k8s-lbry speech-lbrynet-copy-wallet /tmp/k8s-lbry-speech-lbrynet-wallet-backup.json ``` -Open your browser to the hostname specified in `speech.service.hostname` and -demo the site. +Check the contents of `/tmp/k8s-lbry-speech-lbrynet-wallet-backup.json` and move +the file to a safe place for backup (make sure to delete the temporary file.) -## TLS Support +#### Fund your speech wallet -Enabling TLS (SSL) for your cluster is optional, but it is useful if you are -going to expose any HTTP services externally. - -### Assign DNS name(s) to your Load Balancer - -The k8s-lbry chart started a Load Balancer as part of the Ingress Controller. -You can assign a DNS name to the Load Balancer External IP address. - -Get the External IP of the Load Balancer: +Once your wallet is backed up, you can generate a receiving address in order to +deposit LBC: ``` -kubectl get svc -l app=nginx-ingress,component=controller +k8s-lbry speech-lbrynet address unused ``` -Copy the External IP address shown. Update your DNS provider for your domain -accordingly, with a subdomain of your choice to point to the External IP address. +Now send at least 5 LBC to your new speech wallet address. -Edit `values.yaml` and set `echo-service.enabled` to `true`. Set -`echo-service.hostname` to the domain name you configued in your DNS. - -Upgrade the release to turn on the echo-http-server: +Verify your speech wallet balance: ``` -helm upgrade k8s-lbry k8s-lbry/k8s-lbry -f values.yaml +k8s-lbry speech-lbrynet account balance ``` -Verify that the DNS is setup correctly by using curl to the echo-http-server on -port 80: +#### Create a thumbnail channel + +Create the LBRY channel for hosting speech thumbnails. Replace `@YOUR_NAME_HERE` +with your chosen (unique) channel name to create. Amount is how much LBC to +reserve for the claim: ``` -curl http://echo.example.com +k8s-lbry speech-lbrynet channel new @YOUR_NAME_HERE --amount=1.0 ``` -(Replace `echo.example.com` with the domain you used in `values.yaml`.) - -You should see the word `echo` returned. - - -### Enable TLS - -Once you've verified that DNS for your domain correctly routes to the -echo-http-server, upgrade the nginx-ingress release with HTTPs now turned on: +Get the claim id for the channel: ``` -helm upgrade nginx-ingress stable/nginx-ingress \ - --set nginx-ingress.controller.service.enableHttps=true +k8s-lbry speech-lbrynet channel list ``` -Upgrade the k8s-lbry release, turning on HTTPs for the echo-http-server: +The `claim_id` field is your `thumbnailChannelId` used in the next section. + +#### Finish speech setup + +Edit `values-dev.yaml` again: + + * Set `speech.site.publishing.primaryClaimAddress` The fresh wallet address + generated above. + * Set `speech.site.publishing.thumbnailChannel` The name of the channel to + publish thumbnails + * Set `speech.site.publishing.thumbnailChannelId` The claim id of the channel + to publish thumbnails. (see `k8s-lbry speech-lbrynet channel list`) + * Set `speech.site.publishing.serviceOnlyApproved` if you want to limit the + channels served. + * Set `speech.site.publishing.approvedChannels` if you want to limit the + channels served. + * Set `speech.site.analytics.googleId` + +See the [speech settings docs for more +info](https://github.com/lbryio/spee.ch/blob/master/docs/settings.md) + +Upgrade the release to apply the new settings: ``` -helm upgrade k8s-lbry k8s-lbry/k8s-lbry -f values.yaml --set echo-http-server.enableHttps=true +k8s-lbry upgrade ``` -Check that HTTPs connection to the echo service is working: +Restart the speech pod: ``` -curl https://echo.example.com +k8s-lbry restart speech ``` -(Replace `echo.example.com` with the domain you used in `values.yaml`.) +## Extra commands that k8s-lbry (run.sh) provides -You should see the word `echo` returned. However, it may take up to 5 minutes -for it to start to work. +You can run `k8s-lbry` without any arguments, and it will provide you some help. -Watch the cert-manager log: +### k8s-lbry helm + +This script encapsulates helm so that it can run it's own local instance of +tiller through [helm-tiller](https://github.com/rimusz/helm-tiller). As a +convenience function, run.sh can start tiller locally, pass through any helm +commands to your cluster, and then shutdown tiller: + +Example: ``` -kubectl logs --namespace cert-manager -l app=cert-manager -f +k8s-lbry helm ls ``` -A successful certificate message would look like: + +If you try to run `helm` without the `run.sh helm` wrapper, you should expect to +see this error: ``` -Certificate "echo-tls" for ingress "echo" is up to date +Error: could not find tiller ``` -Retry the curl command until you get an `echo` response. +By design, [tiller is not running on your +cluster](https://rimusz.net/tillerless-helm), it just runs locally for the +duration that `run.sh` needs it, then shuts down. -## Improvements +### k8s-lbry kubectl -Beyond this point, there are several things one could do to improve this -configuration and harden for production. +This script encapsulates kubectl so that you do not have to keep typing +`--namespace k8s-lbry` all the time. All kubectl commands will default to +`k8s-lbry` or the `NAMESPACE` environment variable if set. - * Secrets +Example: - * At this stage, all your configuration resides in `values.yaml`, including -passwords. You can seperate these secrets out of your config and put them into a -[Kubernetes Secret](https://kubernetes.io/docs/concepts/configuration/secret/). +``` +k8s-lbry kubectl get pods +``` - * [Sealed Secrets](https://github.com/bitnami-labs/sealed-secrets) +### k8s-lbry logs - * [Helm Secrets](https://github.com/futuresimple/helm-secrets) +Stream the logs for a pod into your terminal, given the helm app name. If the +pod contains more than one container you must specify it as the third argument. - * Namespaces +Examples: - * If you are using the cluster for things other than lbry, you should install - k8s-lbry into its own namespace. This will allow pods within the same - namespace to talk to eachother, but not to pods in other namespaces. +``` +k8s-lbry logs lbrycrd - * Using a namespace in the introductory docs above, would have complicated - the (already complex) helm and kubectl commands, so they were omitted. +k8s-lbry logs speech speech-lbrynet +``` - * Both helm and kubectl support the `--namespace` argument. You can translate - all the commands above, adding the `--namespace` argument. +Press Ctrl-C to stop streaming the logs. If the logs seemingly hang forever, +press Ctrl-C and try the command again. - For example, to install the k8s-lbry chart in its own `k8s-lbry` namespace: - ``` - ## helm install RELEASE REPO/CHART --namespace NAMESPACE -f VALUES - helm install k8s-lbry k8s-lbry/k8s-lbry --namespace k8s-lbry -f values.yaml - ``` +### k8s-lbry shell - And to look at pods in the `k8s-lbry` namespace: +When doing maintainance tasks, it is often useful to be able to attach a shell +to a running pod. This is a convenience wrapper that uses the helm app name to +connect to the correct pod. + +This will connect to the pod running the `lbrynet` service. + +Example: + +``` +k8s-lbry shell lbrynet +``` + +Once in the shell, do whatever maintaince is necessary, and press `Ctrl-D` or +type `exit` when done. + +### k8s-lbry shell-pvc + +When doing maintainance tasks, it is often useful to be able to run a utility +container that mounts the persistent volume +([PVC](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)) of +another container. This is especially useful in scenarios where the pod will not +start, and therefore cannot use the `run.sh shell` command in the previous +section. + +This will run a shell in a new utility container, mounting the lbrynet PVC to +`/pvcs/lbrynet`. + +Example: + +``` +k8s-lbry shell-pvc lbrynet +``` + +Once in the shell, do whatever maintaince is necessary, and press `Ctrl-D` or +type `exit` when done. + + +### k8s-lbry restart + +Delete a pod for a given app name. The existing deployment will immediately +restart a new pod. + +Example: + +``` +k8s-lbry restart speech +``` + +### k8s-lbry lbrynet +You can run the `lbrynet` client from within the running pod, redirecting output +to your local console. + +Example: + +``` +k8s-lbry lbrynet --help +``` + +### k8s-lbry chainquery-mysql-client + +Run the mysql shell for the chainquery database. + +### k8s-lbry speech-mysql-client + +Run the mysql shell for the speech database. + +### k8s-lbry lbrynet-copy-wallet + +Backup the lbrynet wallet to a local path. + +Example: + +``` +k8s-lbry lbrynet-copy-wallet /tmp/k8s-lbry-lbrynet-wallet-backup.json +``` +### k8s-lbry package + +This is for the developer of this package to build and maintain the helm package +releases, and upload to the S3 package repository. Requires `s3cmd` installed. + +Example: + +``` +k8s-lbry package 0.1.1 +``` + + +## TLS / SSL / HTTPS + +You have already installed cert-manager for your cluster, but HTTPs is not +turned on out of the box. Setup is easy: + + * You need to create a DNS A record for your domain pointing to the External IP + address of the nginx-ingress Load Balancer. (Preferably create a wildcard + record for an entire subdomain [`*.example.com` or `*.lbry.example.com`], + that way you only have to set this up once, no matter how many sub-domains + you end up needing.) Refer to the [install + section](https://github.com/EnigmaCurry/lbry-docker/tree/k8s-lbry/contrib/k8s-lbry#k8s-lbry-install) + for how to retrieve the IP address. + + * Edit `values-dev.yaml` + + * Change `cert-manager-issuer.email` from the example email address to your + own. [Let's Encrypt](https://letsencrypt.org/) is a free TLS certificate + issuer, and they will send you important emails about your domain and + certificate expirations. + + * You can turn on the echo service to test with: + + * Change `echo-http-server.enabled` to `true` + + * Change `echo-http-server.hostname` to a hostname you've configured the DNS + for. + +Upgrade nginx-ingress, turning on HTTPs support: + +``` +NGINX_ENABLE_HTTPS=true k8s-lbry upgrade-nginx-ingress +``` + +And Upgrade `k8s-lbry`: + +``` +k8s-lbry upgrade +``` + +If you turned on the echo service, try it out with curl: + +``` +curl -L https://echo.example.com +``` + +It should return the name of the service: `echo-http-server`. + +If you get any certificate validation errors, then you may need to wait for up +to 20 minutes for the certificate to be issued, and then retry. + +If you run into problems with certificates, check out the cert-manager logs: + +``` +kubectl -n cert-manager logs -l app=cert-manager -f +``` + +Also check the certificate resources: + +``` +k8s-lbry kubectl get certificates +``` + +You should see the `echo-http-server-tls` certificate resource listed. The +`READY` status is the indicator as to whether the certificate has been issued +yet or not. + +## Cloud specific notes + +### AWS + +Deployment on AWS requires these modifications: + +Following the [CDK on +AWS](https://www.ubuntu.com/kubernetes/docs/aws-integration) docs, install the +StorageClass for EBS: + +``` +kubectl create -f - < /dev/null; then + echo "Error: You must install helm" + echo "On Ubuntu you can run: sudo snap install --classic helm" + echo "For other platforms, see https://github.com/helm/helm/releases/latest" + exit 1 + fi + if ! which kubectl > /dev/null; then + echo "Error: You must install kubectl" + echo "On Ubuntu you can run: sudo snap install --classic kubectl" + echo "For other platforms, see https://kubernetes.io/docs/tasks/tools/install-kubectl/" + exit 1 + fi + if ! which git > /dev/null; then + echo "Error: You must install git" + echo "On Ubuntu you can run: sudo apt install -y git" + echo "For other platforms, see https://git-scm.com/downloads" + exit 1 + fi + + ### Initialize helm locally, but do not install tiller to the cluster: + HELM=$(which helm) + if [ ! -f "$HOME"/.helm/repository/repositories.yaml ]; then + exe "$HELM" init --client-only + fi + + ### Add the stable helm chart repository: + if [ "$CHART" != "$BASEDIR" ]; then + exe "$HELM" repo add lbry "$HELM_REPO" + exe "$HELM" repo update + fi + + ### Install helm-tiller plugin, so that no tiller needs to be installed to the cluster: + exe "$HELM" plugin install https://github.com/rimusz/helm-tiller || true + + ### Setup the values.yaml for the chart, using the VALUES environment variable or script default + ### If no values file exists, interactively ask if a default config should be created in its place. + if [ ! -f "$VALUES" ]; then + echo "" + echo "Values file does not exist: $VALUES" + read -p "Would you like to create a default config file here? (y/N)" choice + echo "" + case "$choice" in + y|Y ) curl "$DEFAULT_VALUES_URL" -Lo "$VALUES" + echo "Default configuration created: $VALUES" + ;; + * ) echo "You must create your own values file: $VALUES (using values.yaml as a template.)" + echo "Or set VALUES=/path/to/values.yaml before subsequent commands." + exit 1 + ;; + esac + else + echo "Configuration found: $VALUES" + fi + echo "Edit this config file to suit your own environment before install/upgrade" + +} + +helm() { + ## Redefine all helm commands to run through local tiller instance + ## https://rimusz.net/tillerless-helm + HELM=$(which helm) + exe "$HELM" tiller run "$NAMESPACE" -- helm "$*" +} + +kubectl() { + ## kubectl wrapper that defaults to k8s-lbry namespace, so you don't have to + ## type as much, but still passes all the provided arguments on to kubectl. + ## So you can still specify a different namespace, because the client args + ## are applied last. + KUBECTL=$(which kubectl) + exe "$KUBECTL" --namespace "$NAMESPACE" "$*" +} + +install-nginx-ingress() { + ### Install nginx-ingress from stable helm repository + ### See https://github.com/helm/charts/tree/master/stable/nginx-ingress + helm install stable/nginx-ingress --namespace "$NAMESPACE" --name nginx-ingress --set nginx-ingress.controller.service.enableHttps="$NGINX_ENABLE_HTTPS" --set tcp.9246="$NAMESPACE/lbrycrd:9246" +} + +upgrade-nginx-ingress() { + ### Upgrade nginx-ingress + helm upgrade nginx-ingress stable/nginx-ingress --namespace "$NAMESPACE" --set nginx-ingress.controller.service.enableHttps="$NGINX_ENABLE_HTTPS" --set tcp.9246="$NAMESPACE/lbrycrd:9246" +} + +install-cert-manager() { + ### Install cert-manager from jetstack helm repository + ### See https://docs.cert-manager.io/en/latest/index.html + kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.7/deploy/manifests/00-crds.yaml + + helm repo add jetstack https://charts.jetstack.io + helm repo update + + helm install --name cert-manager --namespace "$CERTMANAGER_NAMESPACE" jetstack/cert-manager --version $CERTMANAGER_VERSION + + kubectl label namespace "$CERTMANAGER_NAMESPACE" certmanager.k8s.io/disable-validation="true" +} + +upgrade-cert-manager() { + ### Upgrade cert-manager + helm upgrade cert-manager jetstack/cert-manager --namespace "$CERTMANAGER_NAMESPACE" --version $CERTMANAGER_VERSION +} + +install() { + ### Install the k8s-lbry helm chart + if [ ! -f "$VALUES" ]; then + echo "Could not find chart values file: $VALUES" + exit 1 + fi + helm install "$CHART" --name "$RELEASE" --namespace="$NAMESPACE" -f "$VALUES" +} + +upgrade() { + ### Upgrade the k8s-lbry helm chart + if [ ! -f "$VALUES" ]; then + echo "Could not find chart values file: $VALUES" + exit 1 + fi + helm upgrade "$RELEASE" "$CHART" --namespace="$NAMESPACE" -f "$VALUES" +} + +shell() { + ### Execute a shell in the running container with helm app name provided by first argument + ( + if [ "$#" -eq 1 ]; then + KUBECTL=$(which kubectl) + POD=$($KUBECTL get --namespace "$NAMESPACE" pod -l app="$1" -o name | sed s+pod/++) + exe kubectl exec -it "$POD" /bin/bash + else + echo "Required arg: helm app name of pod to shell into" + fi + ) +} + +shell-pvc() { + ### Start a utility container shell with an attached Persistent Volume Claim. + ( + # From https://gist.github.com/yuanying/3aa7d59dcce65470804ab43def646ab6 + + IMAGE="ubuntu:18.04" + COMMAND="/bin/bash" + SUFFIX=$(date +%s | shasum | base64 | fold -w 10 | head -1 | tr '[:upper:]' '[:lower:]') + + usage_exit() { + echo "Usage: $0 [-c command] [-i image] PVC ..." 1>&2 + exit 1 + } + + if [ "$#" -ne 1 ]; then + usage_exit + fi + + while getopts i:h OPT + do + case $OPT in + i) IMAGE=$OPTARG + ;; + c) COMMAND=$OPTARG + ;; + h) usage_exit + ;; + \?) usage_exit + ;; + esac + done + shift $(($OPTIND - 1)) + + VOL_MOUNTS="" + VOLS="" + COMMA="" + + for i in $@ + do + VOL_MOUNTS="${VOL_MOUNTS}${COMMA}{\"name\": \"${i}\",\"mountPath\": \"/pvcs/${i}\"}" + VOLS="${VOLS}${COMMA}{\"name\": \"${i}\",\"persistentVolumeClaim\": {\"claimName\": \"${i}\"}}" + COMMA="," + done + + $(which kubectl) --namespace "$NAMESPACE" run -it --rm --restart=Never --image="${IMAGE}" pvc-mounter-"${SUFFIX}" --overrides " +{ + \"spec\": { + \"hostNetwork\": true, + \"containers\":[ + { + \"args\": [\"${COMMAND}\"], + \"stdin\": true, + \"tty\": true, + \"name\": \"pvc\", + \"image\": \"${IMAGE}\", + \"volumeMounts\": [ + ${VOL_MOUNTS} + ] + } + ], + \"volumes\": [ + ${VOLS} + ] + } +} +" -- "${COMMAND}" + + ) +} + +restart() { + ### Restart the pod given by a helm app name + ( + if [ "$#" -eq 1 ]; then + KUBECTL=$(which kubectl) + POD=$($KUBECTL get --namespace "$NAMESPACE" pod -l app="$1" -o name | sed s+pod/++) + exe kubectl delete pod "$POD" + else + echo "Required arg: helm app name of pod to restart" + fi + ) +} + + +package() { + ### Create a packaged helm release and upload to the S3 repository + ( + cd $BASEDIR + set -e + if [ "$#" -eq 1 ]; then + if ! grep "version: $1" Chart.yaml; then + echo "Chart.yaml version does not match intended package version ($1)." + exit 1 + fi + else + echo "required argument: package version" + exit 1 + fi + + PACKAGE="k8s-lbry-$1.tgz" + + ## Build Helm package repository and upload to s3 + if which s3cmd > /dev/null; then + if s3cmd info $PACKAGE_BUCKET > /dev/null; then + # Download all remote releases, to re-include in new index.yaml + exe s3cmd sync $PACKAGE_BUCKET . + + # Check if release already exists + s3_url="$PACKAGE_BUCKET/$PACKAGE" + if s3cmd info "$s3_url"; then + echo "$s3_url already exists. Aborting." + exit 1 + fi + + # Package release and rebuild repository + exe helm dependency update + exe helm package . + exe helm repo index . + + # Publish packages to s3 + exe s3cmd put --acl-public index.yaml "$PACKAGE" $PACKAGE_BUCKET + exe s3cmd put --acl-public charts/*.tgz $PACKAGE_BUCKET/charts/ + else + echo "s3cmd is not setup, run s3cmd --configure" + exit 1 + fi + else + echo "s3cmd is not installed" + exit 1 + fi + ) +} + +chainquery-mysql-client() { + ### Access the mysql shell for chainquery + KUBECTL=$(which kubectl) + POD=$($KUBECTL -n "$NAMESPACE" get pod -l app=chainquery-mysql -o name | sed s+pod/++) + if [ ${#POD} -gt 0 ]; then + kubectl exec -it "$POD" -- mysql -u chainquery -pchainquery + else + echo "chainquery-mysql pod not found" + fi +} + +speech-mysql-client() { + ### Access the mysql shell for speech + KUBECTL=$(which kubectl) + POD=$($KUBECTL -n "$NAMESPACE" get pod -l app=speech-mysql -o name | sed s+pod/++) + if [ ${#POD} -gt 0 ]; then + kubectl exec -it "$POD" -- mysql -u speech -pspeech + else + echo "speech-mysql pod not found" + fi +} + +chainquery-override-snapshot() { + ### Delete the existing chainquery database and download a snapshot to restore + read -p "Would you like to DESTROY the existing chainquery database, +and restore from a fresh snapshot? (y/N) " destroy_chainquery + case "$destroy_chainquery" in + y|Y ) + kubectl delete deployments chainquery chainquery-mysql || true + echo "Please wait.." + IMAGE="ubuntu:18.04" + SUFFIX=$(date +%s | shasum | base64 | fold -w 10 | head -1 | tr '[:upper:]' '[:lower:]') + VOL_MOUNTS="{\"name\": \"chainquery-mysql\",\"mountPath\": \"/pvcs/chainquery-mysql\"}" + VOLS="{\"name\": \"chainquery-mysql\",\"persistentVolumeClaim\": {\"claimName\": \"chainquery-mysql\"}}" + COMMAND="rm -rf /pvcs/chainquery-mysql/* && apt-get update && apt-get install -y curl && curl -s ${CHAINQUERY_SNAPSHOT_URL} | tar xvz -C /pvcs/chainquery-mysql/" + $(which kubectl) --namespace "$NAMESPACE" run -it --rm --restart=Never --image=${IMAGE} pvc-mounter-"${SUFFIX}" --overrides " +{ + \"spec\": { + \"hostNetwork\": true, + \"containers\":[ + { + \"args\": [\"bin/bash\", \"-c\", \"${COMMAND}\"], + \"stdin\": true, + \"tty\": true, + \"name\": \"pvc\", + \"image\": \"${IMAGE}\", + \"volumeMounts\": [ + ${VOL_MOUNTS} + ] + } + ], + \"volumes\": [ + ${VOLS} + ] + } +} +" + echo "Extraction complete" + ;; + * ) echo "Aborted." + ;; + esac +} + +lbrycrd-override-snapshot() { + ### Delete the existing lbrycrd data and download a snapshot to restore + read -p "Would you like to DESTROY the existing lbrycrd data, +and restore from a fresh snapshot? (y/N) " destroy_lbrycrd + case "$destroy_lbrycrd" in + y|Y ) + kubectl delete deployments lbrycrd || true + echo "Please wait.." + IMAGE="ubuntu:18.04" + SUFFIX=$(date +%s | shasum | base64 | fold -w 10 | head -1 | tr '[:upper:]' '[:lower:]') + VOL_MOUNTS="{\"name\": \"lbrycrd\",\"mountPath\": \"/pvcs/lbrycrd\"}" + VOLS="{\"name\": \"lbrycrd\",\"persistentVolumeClaim\": {\"claimName\": \"lbrycrd\"}}" + COMMAND="rm -rf /pvcs/lbrycrd/* && apt-get update && apt-get install -y curl && curl -s ${LBRYCRD_SNAPSHOT_URL} | tar xvz -C /pvcs/lbrycrd/" + $(which kubectl) --namespace "$NAMESPACE" run -it --rm --restart=Never --image=${IMAGE} pvc-mounter-"${SUFFIX}" --overrides " +{ + \"spec\": { + \"hostNetwork\": true, + \"containers\":[ + { + \"args\": [\"bin/bash\", \"-c\", \"${COMMAND}\"], + \"stdin\": true, + \"tty\": true, + \"name\": \"pvc\", + \"image\": \"${IMAGE}\", + \"volumeMounts\": [ + ${VOL_MOUNTS} + ] + } + ], + \"volumes\": [ + ${VOLS} + ] + } +} +" + echo "Extraction complete" + ;; + * ) echo "Aborted." + ;; + esac +} + +logs() { + ### Watch the logs of a pod by helm app name + ( + set -e + if [ "$#" -eq 1 ]; then + kubectl logs -l app="$1" -f + elif [ "$#" -eq 2 ]; then + KUBECTL=$(which kubectl) + POD=$($KUBECTL get --namespace "$NAMESPACE" pod -l app="$1" -o name | sed s+pod/++) + kubectl logs "$POD" "$2" -f + else + echo "Required arg: app_name" + fi + ) +} + +lbrynet-copy-wallet() { + ### Copy the lbrynet wallet to a local path for backup + ( + set -e + if [ "$#" -eq 1 ]; then + WALLET=/home/lbrynet/.local/share/lbry/lbryum/wallets/default_wallet + KUBECTL=$(which kubectl) + POD=$($KUBECTL -n "$NAMESPACE" get pod -l app=lbrynet -o name | sed s+pod/++) + kubectl cp "$POD":$WALLET "$1" + chmod 600 "$1" + echo "lbrynet wallet copied to $1" + else + echo "Required arg: path of backup location for wallet" + fi + ) +} + +speech-lbrynet-copy-wallet() { + ### Copy the speech-lbrynet wallet to a local path for backup + ( + set -e + if [ "$#" -eq 1 ]; then + WALLET=/home/lbrynet/.local/share/lbry/lbryum/wallets/default_wallet + KUBECTL=$(which kubectl) + POD=$($KUBECTL -n "$NAMESPACE" get pod -l app=speech -o name | sed s+pod/++) + kubectl cp "$POD":$WALLET "$1" -c speech-lbrynet + chmod 600 "$1" + echo "lbrynet wallet copied to $1" + else + echo "Required arg: path of backup location for wallet" + fi + ) +} + +lbrycrd-cli() { + ## Run lbrycrd-cli client from inside the running pod outputting to your local terminal + KUBECTL=$(which kubectl) + POD=$($KUBECTL -n "$NAMESPACE" get pod -l app=lbrycrd -o name | sed s+pod/++) + if [ ${#POD} -gt 0 ]; then + kubectl exec "$POD" -- lbrycrd-cli -rpcuser="$LBRYCRD_RPC_USER" -rpcpassword="$LBRYCRD_RPC_PASSWORD" "$*" + else + echo "lbrycrd pod not found" + fi +} + +lbrynet() { + ## Run lbrynet client from inside the running pod outputting to your local terminal + KUBECTL=$(which kubectl) + POD=$($KUBECTL -n "$NAMESPACE" get pod -l app=lbrynet -o name | sed s+pod/++) + if [ ${#POD} -gt 0 ]; then + kubectl exec "$POD" -- lbrynet "$*" + else + echo "lbrynet pod not found" + fi +} + +speech-lbrynet() { + ## Run lbrynet client from inside the running pod outputting to your local terminal + KUBECTL=$(which kubectl) + POD=$($KUBECTL -n "$NAMESPACE" get pod -l app=speech -o name | sed s+pod/++) + if [ ${#POD} -gt 0 ]; then + kubectl exec "$POD" -c speech-lbrynet -- lbrynet "$*" + else + echo "lbrynet pod not found" + fi +} + +SUBCOMMANDS_NO_ARGS=(setup install install-nginx-ingress install-cert-manager upgrade + upgrade-nginx-ingress upgrade-cert-manager chainquery-mysql-client + speech-mysql-client chainquery-override-snapshot lbrycrd-override-snapshot + setup-alias) + +SUBCOMMANDS_PASS_ARGS=(helm kubectl shell shell-pvc restart package logs lbrynet-copy-wallet lbrynet speech-lbrynet-copy-wallet speech-lbrynet lbrycrd-cli completion) + +completion() { + if [ "$#" -eq 1 ] && [ "$1" == "bash" ]; then + cat < "$BASEDIR"/completion.bash.inc + $(which helm) completion bash >> "$BASEDIR"/completion.bash.inc + completion bash >> "$BASEDIR"/completion.bash.inc + + if [[ -z $K8S_LBRY_HOME ]] && ! grep "K8S_LBRY_HOME" "$HOME"/.bashrc > /dev/null; then + echo "K8S_LBRY_HOME not set." + read -p "Would you this script to edit $HOME/.bashrc to add tab completion support? (y/N) " choice + case "$choice" in + y|Y ) + cat <> "$HOME"/.bashrc + +## Enable bash completion +if [ -f /etc/bash_completion ]; then + source /etc/bash_completion +fi + +## k8s-lbry alias and tab completion +K8S_LBRY_HOME=$BASEDIR +alias $RUN_ALIAS=\$K8S_LBRY_HOME/run.sh +if [ -f \$K8S_LBRY_HOME/completion.bash.inc ]; then + source \$K8S_LBRY_HOME/completion.bash.inc +fi +EOF + echo "Created new alias: $RUN_ALIAS" + echo "To use the new alias, run \"source ~/.bashrc\" or just close your terminal session and restart it." + ;; + * ) echo "Aborting" && exit 1;; + esac + else + echo "K8S_LBRY_HOME environment already setup. Nothing left to do." + fi +} + +if printf '%s\n' ${SUBCOMMANDS_NO_ARGS[@]} | grep -q -P "^$1$"; then + ## Subcommands that take no arguments: + ( + set -e + if [ "$#" -eq 1 ]; then + $* + else + echo "$1 does not take any additional arguments" + fi + ) +elif printf '%s\n' ${SUBCOMMANDS_PASS_ARGS[@]} | grep -q -P "^$1$"; then + ## Subcommands that pass all arguments: + ( + set -e + $* + ) +else + if [[ $# -gt 0 ]]; then + echo "## Invalid command: $1" + else + echo "## Must specify a command:" + fi + echo "" + echo "## $0 setup" + echo "## - Setup dependencies" + echo "" + echo "## $0 install-nginx-ingress" + echo "## - Deploy nginx-ingress chart" + echo "" + echo "## $0 install-cert-manager" + echo "## - Deploy cert-manager chart" + echo "" + echo "## $0 install" + echo "## - Deploy main k8s-lbry chart" + echo "" + echo "## $0 upgrade" + echo "## - Upgrade an existing release" + echo "" + echo "## $0 shell " + echo "## - execute shell into running helm application pod" + echo "" + echo "## $0 shell-pvc [-c command] [-i image] PVC" + echo "## - run a utility shell with the named PVC mounted in /pvcs" + echo "" + echo "## $0 helm [...] " + echo "## - run any helm command (through helm-tiller wrapper)" + echo "" + echo "## $0 kubectl [...]" + echo "## - run any kubectl command (defaulting to configured namespace)" + echo "" + echo "## $0 chainquery-mysql-client" + echo "## - run mysql shell for chainquery database" + echo "" + echo "## $0 speech-mysql-client" + echo "## - run mysql shell for speech database" + echo "" + echo "## $0 chainquery-override-snapshot" + echo "## - Restore chainquery database from snapshot" + echo "" + echo "## $0 lbrycrd-override-snapshot" + echo "## - Restore lbrycrd database from snapshot" + echo "" + echo "## $0 logs [container]" + echo "## - Stream the logs for the pod running the helm app name provided" + echo "## (specify which container if the pod has more than one.)" + echo "" + echo "## $0 lbrynet-copy-wallet " + echo "## - Backup the lbrynet wallet file to a local path" + echo "" + echo "## $0 lbrynet " + echo "## - Run lbrynet client inside running lbrynet pod" + echo "" + echo "## $0 speech-lbrynet-copy-wallet " + echo "## - Backup the speech-lbrynet wallet file to a local path" + echo "" + echo "## $0 speech-lbrynet " + echo "## - Run speech-lbrynet client inside running speech pod" + echo "" + echo "## $0 setup-alias" + echo "## - Setup bash alias and tab completion for run.sh" + echo "" + exit 1 +fi diff --git a/contrib/k8s-lbry/scripts/kubectl-run-with-pvc.sh b/contrib/k8s-lbry/scripts/kubectl-run-with-pvc.sh deleted file mode 100755 index c8ce68b..0000000 --- a/contrib/k8s-lbry/scripts/kubectl-run-with-pvc.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash - -# From https://gist.github.com/yuanying/3aa7d59dcce65470804ab43def646ab6 - -IMAGE="ubuntu:18.04" -COMMAND="/bin/bash" -SUFFIX=$(date +%s | shasum | base64 | fold -w 10 | head -1 | tr '[:upper:]' '[:lower:]') - -usage_exit() { - echo "Usage: $0 [-c command] [-i image] PVC ..." 1>&2 - exit 1 -} - -if [ "$#" -ne 1 ]; then - usage_exit -fi - -while getopts i:h OPT -do - case $OPT in - i) IMAGE=$OPTARG - ;; - c) COMMAND=$OPTARG - ;; - h) usage_exit - ;; - \?) usage_exit - ;; - esac -done -shift $(($OPTIND - 1)) - -VOL_MOUNTS="" -VOLS="" -COMMA="" - -for i in $@ -do - VOL_MOUNTS="${VOL_MOUNTS}${COMMA}{\"name\": \"${i}\",\"mountPath\": \"/pvcs/${i}\"}" - VOLS="${VOLS}${COMMA}{\"name\": \"${i}\",\"persistentVolumeClaim\": {\"claimName\": \"${i}\"}}" - COMMA="," -done - -kubectl run -it --rm --restart=Never --image=${IMAGE} pvc-mounter-${SUFFIX} --overrides " -{ - \"spec\": { - \"hostNetwork\": true, - \"containers\":[ - { - \"args\": [\"${COMMAND}\"], - \"stdin\": true, - \"tty\": true, - \"name\": \"pvc\", - \"image\": \"${IMAGE}\", - \"volumeMounts\": [ - ${VOL_MOUNTS} - ] - } - ], - \"volumes\": [ - ${VOLS} - ] - } -} -" -- ${COMMAND} diff --git a/contrib/k8s-lbry/scripts/package.sh b/contrib/k8s-lbry/scripts/package.sh deleted file mode 100755 index f6b3cbc..0000000 --- a/contrib/k8s-lbry/scripts/package.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -## Build Helm package and upload to s3 repository - -exe() { echo "\$ $@" ; "$@" ; } - -# Work from the parent directory to this script: -cd `dirname "$0"` && cd .. - -if s3cmd info s3://k8s-lbry > /dev/null; then - exe helm dependency update - exe helm package . - exe helm repo index . - - exe s3cmd put --acl-public index.yaml k8s-lbry-*.tgz s3://k8s-lbry/ - exe s3cmd put --acl-public charts/*.tgz s3://k8s-lbry/charts/ -else - echo "s3cmd is not setup, run s3cmd --configure" - exit 1 -fi - diff --git a/contrib/k8s-lbry/values.yaml b/contrib/k8s-lbry/values.yaml index c7a476e..a1c5642 100644 --- a/contrib/k8s-lbry/values.yaml +++ b/contrib/k8s-lbry/values.yaml @@ -2,19 +2,8 @@ cert-manager-issuer: # Enter your email address to receive important notices from Let's Encrypt: email: "fred@example.com" -echo-http-server: - enabled: false - # Enter your domain name for the echo test service: - hostname: "echo.example.com" - service: echo-http-server - enableHttps: true - certificateIssuer: letsencrypt-prod - -echo-socket-server: - enabled: false - service: echo-socket-server - lbrycrd: + fullnameOverride: lbrycrd enabled: true image: repository: lbry/lbrycrd @@ -28,7 +17,9 @@ lbrycrd: accessMode: ReadWriteOnce size: 50Gi annotations: - "helm.sh/resource-policy": keep + "helm.sh/resource-policy": keep + # If on AWS: + # storageClass: "ebs-gp2" configurationFile: lbrycrd.conf: |- datadir=/data @@ -36,14 +27,14 @@ lbrycrd: rpcpassword=lbry regtest=0 txindex=1 - rpcallowip=10.244.0.0/16 + rpcallowip=10.0.0.0/8 server=1 listen=1 daemon=0 externalip= chainquery-mysql: - nameOverride: chainquery-mysql + fullnameOverride: chainquery-mysql enabled: false mysqlUser: chainquery mysqlPassword: chainquery @@ -54,8 +45,11 @@ chainquery-mysql: size: 100Gi annotations: "helm.sh/resource-policy": keep + # If on AWS: + # storageClass: "ebs-gp2" chainquery: + fullnameOverride: chainquery enabled: false image: repository: lbry/chainquery @@ -65,17 +59,16 @@ chainquery: port: 6300 configurationFile: chainqueryconfig.toml: |- - lbrycrdurl="rpc://lbry:lbry@k8s-lbry-lbrycrd:9245" - mysqldsn="chainquery:chainquery@tcp(k8s-lbry-chainquery-mysql:3306)/chainquery" - apimysqldsn="chainquery:chainquery@tcp(k8s-lbry-chainquery-mysql:3306)/chainquery" + lbrycrdurl="rpc://lbry:lbry@lbrycrd:9245" + mysqldsn="chainquery:chainquery@tcp(chainquery-mysql:3306)/chainquery" + apimysqldsn="chainquery:chainquery@tcp(chainquery-mysql:3306)/chainquery" lbrynet: + fullnameOverride: lbrynet enabled: false image: - # repository: lbry/lbrynet - # tag: linux-x86_64-production - repository: enigmacurry/dump - tag: lbrynet + repository: lbry/lbrynet + tag: linux-x86_64-production pullPolicy: Always service: rpcPort: 5279 @@ -85,15 +78,20 @@ lbrynet: size: 10Gi annotations: "helm.sh/resource-policy": keep + # If on AWS: + # storageClass: "ebs-gp2" daemon_settings: api: 0.0.0.0:5279 use_upnp: false auto_re_reflect_interval: 0 max_key_fee: {amount: 0, currency: LBC} run_reflector_server: false + save_files: false speech-mysql: - nameOverride: speech-mysql + fullnameOverride: speech-mysql + labels: + app: speech-mysql enabled: false mysqlUser: speech mysqlPassword: speech @@ -104,11 +102,15 @@ speech-mysql: size: 100Gi annotations: "helm.sh/resource-policy": keep + # If on AWS: + # storageClass: "ebs-gp2" + speech: + fullnameOverride: speech enabled: false service: - name: k8s-lbry-speech + name: speech hostname: "speech.example.com" port: 3000 enableHttps: true @@ -123,25 +125,48 @@ speech: size: 20Gi annotations: "helm.sh/resource-policy": keep + # If on AWS: + # storageClass: "ebs-gp2" auth: masterPassword: speech - sessionKey: mysecretkeyword mysql: - host: k8s-lbry-speech-mysql + host: speech-mysql database: speech username: speech password: speech chainquery: - host: k8s-lbry-chainquery-mysql + host: chainquery-mysql port: 3306 timeout: 30 database: chainquery username: chainquery password: chainquery lbrynet: - apiHost: k8s-lbry-lbrynet + enabled: true + apiHost: 127.0.0.1 apiPort: 5279 getTimeout: 30 + image: + repository: enigmacurry/dump + tag: lbrynet-v0.33.0 + pullPolicy: Always + service: + rpcPort: 5279 + daemon_settings: + api: 0.0.0.0:5279 + use_upnp: false + auto_re_reflect_interval: 0 + max_key_fee: {amount: 0, currency: LBC} + run_reflector_server: false + download_dir: /data/Downloads + persistence: + enabled: true + accessMode: ReadWriteOnce + size: 10Gi + annotations: + "helm.sh/resource-policy": keep + # If on AWS: + # storageClass: "ebs-gp2" logger: logLevel: verbose slack: @@ -167,11 +192,11 @@ speech: primaryClaimAddress: additionalClaimAddresses: [] approvedChannels: [] - channelClaimBidAmount: 0.1 + channelClaimBidAmount: "0.1" closedRegistration: false disabled: false disabledMessage: 'Default publishing disabled message' - fileClaimBidAmount: 0.01 + fileClaimBidAmount: "0.01" fileSizeLimits: application: 50000000 audio: 50000000 @@ -243,21 +268,14 @@ speech: performChecks: true performUpdates: true - -percona: - #### Prefer mysql over percona for now: +echo-http-server: enabled: false - # mysqlUser: chainquery - # mysqlPassword: chainquery - # mysqlDatabase: chainquery - # persistence: - # enabled: true - # accessMode: ReadWriteOnce - # size: 100Gi - # resources: - # requests: - # memory: 1Gi - # cpu: 1 + # Enter your domain name for the echo test service: + hostname: "echo.example.com" + service: echo-http-server + enableHttps: true + certificateIssuer: letsencrypt-prod - - +echo-socket-server: + enabled: false + service: echo-socket-server