2018-01-31 02:15:21 +01:00
|
|
|
package db
|
|
|
|
|
|
|
|
import (
|
2018-06-15 04:30:37 +02:00
|
|
|
"context"
|
2018-01-31 02:15:21 +01:00
|
|
|
"database/sql"
|
2019-06-25 15:13:25 +02:00
|
|
|
"time"
|
2018-01-31 02:15:21 +01:00
|
|
|
|
2019-01-09 23:52:30 +01:00
|
|
|
"github.com/lbryio/lbry.go/dht/bits"
|
|
|
|
"github.com/lbryio/lbry.go/extras/errors"
|
|
|
|
qt "github.com/lbryio/lbry.go/extras/query"
|
2018-08-07 22:51:02 +02:00
|
|
|
|
2019-06-26 20:36:05 +02:00
|
|
|
"github.com/go-sql-driver/mysql"
|
|
|
|
_ "github.com/go-sql-driver/mysql" // blank import for db driver ensures its imported even if its not used
|
2018-01-31 02:15:21 +01:00
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
)
|
|
|
|
|
2018-08-07 22:51:02 +02:00
|
|
|
// SdBlob is a special blob that contains information on the rest of the blobs in the stream
|
|
|
|
type SdBlob struct {
|
|
|
|
StreamName string `json:"stream_name"`
|
|
|
|
Blobs []struct {
|
|
|
|
Length int `json:"length"`
|
|
|
|
BlobNum int `json:"blob_num"`
|
|
|
|
BlobHash string `json:"blob_hash,omitempty"`
|
2018-08-20 23:50:39 +02:00
|
|
|
IV string `json:"iv"`
|
2018-08-07 22:51:02 +02:00
|
|
|
} `json:"blobs"`
|
|
|
|
StreamType string `json:"stream_type"`
|
|
|
|
Key string `json:"key"`
|
|
|
|
SuggestedFileName string `json:"suggested_file_name"`
|
|
|
|
StreamHash string `json:"stream_hash"`
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
|
|
|
|
2018-08-07 22:51:02 +02:00
|
|
|
// SQL implements the DB interface
|
2018-01-31 02:15:21 +01:00
|
|
|
type SQL struct {
|
|
|
|
conn *sql.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
func logQuery(query string, args ...interface{}) {
|
2019-01-09 23:52:30 +01:00
|
|
|
s, err := qt.InterpolateParams(query, args...)
|
2018-01-31 02:15:21 +01:00
|
|
|
if err != nil {
|
|
|
|
log.Errorln(err)
|
|
|
|
} else {
|
|
|
|
log.Debugln(s)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
// Connect will create a connection to the database
|
2018-01-31 02:15:21 +01:00
|
|
|
func (s *SQL) Connect(dsn string) error {
|
|
|
|
var err error
|
2018-08-07 22:51:02 +02:00
|
|
|
// interpolateParams is necessary. otherwise uploading a stream with thousands of blobs
|
|
|
|
// will hit MySQL's max_prepared_stmt_count limit because the prepared statements are all
|
|
|
|
// opened inside a transaction. closing them manually doesn't seem to help
|
|
|
|
dsn += "?parseTime=1&collation=utf8mb4_unicode_ci&interpolateParams=1"
|
2018-01-31 02:15:21 +01:00
|
|
|
s.conn, err = sql.Open("mysql", dsn)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
|
2019-06-26 17:02:37 +02:00
|
|
|
s.conn.SetMaxIdleConns(12)
|
|
|
|
|
2018-01-31 02:15:21 +01:00
|
|
|
return errors.Err(s.conn.Ping())
|
|
|
|
}
|
|
|
|
|
2018-08-07 22:51:02 +02:00
|
|
|
// AddBlob adds a blob to the database.
|
2018-06-19 19:47:13 +02:00
|
|
|
func (s *SQL) AddBlob(hash string, length int, isStored bool) error {
|
2018-01-31 02:15:21 +01:00
|
|
|
if s.conn == nil {
|
|
|
|
return errors.Err("not connected")
|
|
|
|
}
|
|
|
|
|
2019-06-27 21:30:38 +02:00
|
|
|
return addBlob(s.conn, hash, length, isStored)
|
2018-03-01 22:12:53 +01:00
|
|
|
}
|
|
|
|
|
2019-06-27 21:30:38 +02:00
|
|
|
func addBlob(e Executor, hash string, length int, isStored bool) error {
|
2018-01-31 02:15:21 +01:00
|
|
|
if length <= 0 {
|
|
|
|
return errors.Err("length must be positive")
|
|
|
|
}
|
|
|
|
|
2019-06-27 21:30:38 +02:00
|
|
|
err := exec(e,
|
2018-08-07 22:51:02 +02:00
|
|
|
"INSERT INTO blob_ (hash, is_stored, length) VALUES (?,?,?) ON DUPLICATE KEY UPDATE is_stored = (is_stored or VALUES(is_stored))",
|
2018-09-20 20:24:30 +02:00
|
|
|
hash, isStored, length,
|
2018-08-07 22:51:02 +02:00
|
|
|
)
|
2018-01-31 02:15:21 +01:00
|
|
|
if err != nil {
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
// HasBlob checks if the database contains the blob information.
|
2018-01-31 02:15:21 +01:00
|
|
|
func (s *SQL) HasBlob(hash string) (bool, error) {
|
2018-09-20 17:24:36 +02:00
|
|
|
exists, err := s.HasBlobs([]string{hash})
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
2018-09-20 17:24:36 +02:00
|
|
|
return exists[hash], nil
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
2018-02-02 22:49:20 +01:00
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
// HasBlobs checks if the database contains the set of blobs and returns a bool map.
|
2018-05-15 02:55:45 +02:00
|
|
|
func (s *SQL) HasBlobs(hashes []string) (map[string]bool, error) {
|
|
|
|
if s.conn == nil {
|
|
|
|
return nil, errors.Err("not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
var hash string
|
|
|
|
exists := make(map[string]bool)
|
2019-06-25 14:44:30 +02:00
|
|
|
maxBatchSize := 10000
|
2018-05-15 02:55:45 +02:00
|
|
|
doneIndex := 0
|
|
|
|
|
|
|
|
for len(hashes) > doneIndex {
|
|
|
|
sliceEnd := doneIndex + maxBatchSize
|
|
|
|
if sliceEnd > len(hashes) {
|
|
|
|
sliceEnd = len(hashes)
|
|
|
|
}
|
|
|
|
log.Debugf("getting hashes[%d:%d] of %d", doneIndex, sliceEnd, len(hashes))
|
|
|
|
batch := hashes[doneIndex:sliceEnd]
|
|
|
|
|
2019-01-09 23:52:30 +01:00
|
|
|
query := "SELECT hash FROM blob_ WHERE is_stored = ? && hash IN (" + qt.Qs(len(batch)) + ")"
|
2018-05-15 02:55:45 +02:00
|
|
|
args := make([]interface{}, len(batch)+1)
|
|
|
|
args[0] = true
|
|
|
|
for i := range batch {
|
|
|
|
args[i+1] = batch[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
logQuery(query, args...)
|
|
|
|
|
2018-09-20 17:24:36 +02:00
|
|
|
err := func() error {
|
2019-06-25 15:13:25 +02:00
|
|
|
startTime := time.Now()
|
2018-09-20 17:24:36 +02:00
|
|
|
rows, err := s.conn.Query(query, args...)
|
2019-06-25 15:13:25 +02:00
|
|
|
log.Debugf("hashes query took %s", time.Since(startTime))
|
2018-09-20 17:24:36 +02:00
|
|
|
if err != nil {
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
defer closeRows(rows)
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
err := rows.Scan(&hash)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
exists[hash] = true
|
|
|
|
}
|
2018-05-15 02:55:45 +02:00
|
|
|
|
2018-09-20 17:24:36 +02:00
|
|
|
err = rows.Err()
|
2018-05-15 02:55:45 +02:00
|
|
|
if err != nil {
|
2018-09-20 17:24:36 +02:00
|
|
|
return errors.Err(err)
|
2018-05-15 02:55:45 +02:00
|
|
|
}
|
|
|
|
|
2018-09-20 17:24:36 +02:00
|
|
|
doneIndex += len(batch)
|
|
|
|
return nil
|
|
|
|
}()
|
2018-05-15 02:55:45 +02:00
|
|
|
if err != nil {
|
2018-09-20 17:24:36 +02:00
|
|
|
return nil, err
|
2018-05-15 02:55:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return exists, nil
|
|
|
|
}
|
|
|
|
|
2018-09-11 13:41:29 +02:00
|
|
|
// Delete will remove the blob from the db
|
|
|
|
func (s *SQL) Delete(hash string) error {
|
2019-06-27 21:30:38 +02:00
|
|
|
err := exec(s.conn, "DELETE FROM stream WHERE sd_hash = ?", hash)
|
|
|
|
if err != nil {
|
2018-09-11 13:41:29 +02:00
|
|
|
return errors.Err(err)
|
2019-06-27 21:30:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
err = exec(s.conn, "DELETE FROM blob_ WHERE hash = ?", hash)
|
|
|
|
return errors.Err(err)
|
2018-09-11 13:41:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Block will mark a blob as blocked
|
|
|
|
func (s *SQL) Block(hash string) error {
|
|
|
|
query := "INSERT IGNORE INTO blocked SET hash = ?"
|
|
|
|
args := []interface{}{hash}
|
|
|
|
logQuery(query, args...)
|
|
|
|
_, err := s.conn.Exec(query, args...)
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
|
2018-09-20 17:24:36 +02:00
|
|
|
// GetBlocked will return a list of blocked hashes
|
|
|
|
func (s *SQL) GetBlocked() (map[string]bool, error) {
|
|
|
|
query := "SELECT hash FROM blocked"
|
|
|
|
logQuery(query)
|
|
|
|
rows, err := s.conn.Query(query)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Err(err)
|
|
|
|
}
|
|
|
|
defer closeRows(rows)
|
|
|
|
|
|
|
|
blocked := make(map[string]bool)
|
|
|
|
|
|
|
|
var hash string
|
|
|
|
for rows.Next() {
|
|
|
|
err := rows.Scan(&hash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Err(err)
|
|
|
|
}
|
|
|
|
blocked[hash] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
err = rows.Err()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Err(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return blocked, nil
|
|
|
|
}
|
|
|
|
|
2018-08-16 02:17:02 +02:00
|
|
|
// MissingBlobsForKnownStream returns missing blobs for an existing stream
|
|
|
|
// WARNING: if the stream does NOT exist, no blob hashes will be returned, which looks
|
|
|
|
// like no blobs are missing
|
|
|
|
func (s *SQL) MissingBlobsForKnownStream(sdHash string) ([]string, error) {
|
2018-07-26 16:25:47 +02:00
|
|
|
if s.conn == nil {
|
2018-08-16 02:17:02 +02:00
|
|
|
return nil, errors.Err("not connected")
|
2018-07-26 16:25:47 +02:00
|
|
|
}
|
|
|
|
|
2018-08-16 02:17:02 +02:00
|
|
|
query := `
|
|
|
|
SELECT b.hash FROM blob_ b
|
|
|
|
INNER JOIN stream_blob sb ON b.hash = sb.blob_hash
|
|
|
|
INNER JOIN stream s ON s.hash = sb.stream_hash AND s.sd_hash = ?
|
|
|
|
WHERE b.is_stored = 0
|
|
|
|
`
|
2018-07-26 16:25:47 +02:00
|
|
|
args := []interface{}{sdHash}
|
|
|
|
|
|
|
|
logQuery(query, args...)
|
|
|
|
|
2018-08-16 02:17:02 +02:00
|
|
|
rows, err := s.conn.Query(query, args...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Err(err)
|
|
|
|
}
|
|
|
|
defer closeRows(rows)
|
2018-07-26 16:25:47 +02:00
|
|
|
|
2018-08-16 02:17:02 +02:00
|
|
|
var missingBlobs []string
|
|
|
|
var hash string
|
2018-07-26 16:25:47 +02:00
|
|
|
|
2018-08-16 02:17:02 +02:00
|
|
|
for rows.Next() {
|
|
|
|
err := rows.Scan(&hash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Err(err)
|
|
|
|
}
|
|
|
|
missingBlobs = append(missingBlobs, hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = rows.Err()
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Err(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return missingBlobs, errors.Err(err)
|
2018-07-26 16:25:47 +02:00
|
|
|
}
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
// AddSDBlob takes the SD Hash number of blobs and the set of blobs. In a single db tx it inserts the sdblob information
|
|
|
|
// into a stream, and inserts the associated blobs' information in the database. If a blob fails the transaction is
|
|
|
|
// rolled back and error(s) are returned.
|
2018-08-07 22:51:02 +02:00
|
|
|
func (s *SQL) AddSDBlob(sdHash string, sdBlobLength int, sdBlob SdBlob) error {
|
2018-02-02 22:49:20 +01:00
|
|
|
if s.conn == nil {
|
|
|
|
return errors.Err("not connected")
|
|
|
|
}
|
|
|
|
|
2019-06-27 21:30:38 +02:00
|
|
|
// insert sd blob
|
|
|
|
err := addBlob(s.conn, sdHash, sdBlobLength, true)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// insert stream
|
|
|
|
err = exec(s.conn,
|
|
|
|
"INSERT IGNORE INTO stream (hash, sd_hash) VALUES (?,?)",
|
|
|
|
sdBlob.StreamHash, sdHash,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// insert content blobs and connect them to stream
|
|
|
|
for _, contentBlob := range sdBlob.Blobs {
|
|
|
|
if contentBlob.BlobHash == "" {
|
|
|
|
// null terminator blob
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
err := addBlob(s.conn, contentBlob.BlobHash, contentBlob.Length, false)
|
2018-02-02 22:49:20 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-27 21:30:38 +02:00
|
|
|
err = exec(s.conn,
|
|
|
|
"INSERT IGNORE INTO stream_blob (stream_hash, blob_hash, num) VALUES (?,?,?)",
|
|
|
|
sdBlob.StreamHash, contentBlob.BlobHash, contentBlob.BlobNum,
|
2018-08-07 22:51:02 +02:00
|
|
|
)
|
2018-02-02 22:49:20 +01:00
|
|
|
if err != nil {
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
2019-06-27 21:30:38 +02:00
|
|
|
}
|
|
|
|
return nil
|
2018-03-01 22:12:53 +01:00
|
|
|
}
|
|
|
|
|
2018-06-19 19:47:13 +02:00
|
|
|
// GetHashRange gets the smallest and biggest hashes in the db
|
|
|
|
func (s *SQL) GetHashRange() (string, string, error) {
|
|
|
|
var min string
|
|
|
|
var max string
|
|
|
|
|
|
|
|
if s.conn == nil {
|
|
|
|
return "", "", errors.Err("not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
query := "SELECT MIN(hash), MAX(hash) from blob_"
|
|
|
|
|
|
|
|
logQuery(query)
|
|
|
|
|
|
|
|
err := s.conn.QueryRow(query).Scan(&min, &max)
|
|
|
|
return min, max, err
|
|
|
|
}
|
|
|
|
|
2018-06-22 15:30:16 +02:00
|
|
|
// GetStoredHashesInRange gets stored blobs with hashes in a given range, and sends the hashes into a channel
|
|
|
|
func (s *SQL) GetStoredHashesInRange(ctx context.Context, start, end bits.Bitmap) (ch chan bits.Bitmap, ech chan error) {
|
2018-06-15 04:30:37 +02:00
|
|
|
ch = make(chan bits.Bitmap)
|
|
|
|
ech = make(chan error)
|
|
|
|
|
|
|
|
// TODO: needs waitgroup?
|
|
|
|
go func() {
|
|
|
|
defer close(ch)
|
|
|
|
defer close(ech)
|
|
|
|
|
|
|
|
if s.conn == nil {
|
|
|
|
ech <- errors.Err("not connected")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-08-08 02:19:04 +02:00
|
|
|
query := "SELECT hash FROM blob_ WHERE hash >= ? AND hash <= ? AND is_stored = 1"
|
|
|
|
args := []interface{}{start.Hex(), end.Hex()}
|
2018-06-15 04:30:37 +02:00
|
|
|
|
|
|
|
logQuery(query, args...)
|
|
|
|
|
|
|
|
rows, err := s.conn.Query(query, args...)
|
|
|
|
defer closeRows(rows)
|
|
|
|
if err != nil {
|
|
|
|
ech <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var hash string
|
2018-08-07 17:38:55 +02:00
|
|
|
ScanLoop:
|
2018-06-15 04:30:37 +02:00
|
|
|
for rows.Next() {
|
|
|
|
err := rows.Scan(&hash)
|
|
|
|
if err != nil {
|
|
|
|
ech <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2018-08-07 17:38:55 +02:00
|
|
|
break ScanLoop
|
2018-06-15 04:30:37 +02:00
|
|
|
case ch <- bits.FromHexP(hash):
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = rows.Err()
|
|
|
|
if err != nil {
|
|
|
|
ech <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-03-01 22:12:53 +01:00
|
|
|
// txFunc is a function that can be wrapped in a transaction
|
|
|
|
type txFunc func(tx *sql.Tx) error
|
|
|
|
|
|
|
|
// withTx wraps a function in an sql transaction. the transaction is committed if there's no error, or rolled back if there is one.
|
|
|
|
// if dbOrTx is an sql.DB, a new transaction is started
|
|
|
|
func withTx(dbOrTx interface{}, f txFunc) (err error) {
|
|
|
|
var tx *sql.Tx
|
|
|
|
|
|
|
|
switch t := dbOrTx.(type) {
|
|
|
|
case *sql.Tx:
|
|
|
|
tx = t
|
|
|
|
case *sql.DB:
|
|
|
|
tx, err = t.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if p := recover(); p != nil {
|
2018-05-30 03:38:55 +02:00
|
|
|
if rollBackError := tx.Rollback(); rollBackError != nil {
|
|
|
|
log.Error("failed to rollback tx on panic - ", rollBackError)
|
|
|
|
}
|
2018-03-01 22:12:53 +01:00
|
|
|
panic(p)
|
|
|
|
} else if err != nil {
|
2018-05-30 03:38:55 +02:00
|
|
|
if rollBackError := tx.Rollback(); rollBackError != nil {
|
|
|
|
log.Error("failed to rollback tx on panic - ", rollBackError)
|
|
|
|
}
|
2018-03-01 22:12:53 +01:00
|
|
|
} else {
|
|
|
|
err = errors.Err(tx.Commit())
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
default:
|
|
|
|
return errors.Err("db or tx required")
|
2018-02-02 22:49:20 +01:00
|
|
|
}
|
|
|
|
|
2018-03-01 22:12:53 +01:00
|
|
|
return f(tx)
|
2018-02-02 22:49:20 +01:00
|
|
|
}
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
func closeRows(rows *sql.Rows) {
|
2018-06-15 04:30:37 +02:00
|
|
|
if rows != nil {
|
|
|
|
err := rows.Close()
|
|
|
|
if err != nil {
|
|
|
|
log.Error("error closing rows: ", err)
|
|
|
|
}
|
2018-05-30 03:38:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 21:30:38 +02:00
|
|
|
type Executor interface {
|
|
|
|
Exec(query string, args ...interface{}) (sql.Result, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
func exec(e Executor, query string, args ...interface{}) error {
|
2018-08-07 22:51:02 +02:00
|
|
|
logQuery(query, args...)
|
2019-06-26 20:36:05 +02:00
|
|
|
attempt, maxAttempts := 0, 3
|
|
|
|
var err error
|
|
|
|
Retry:
|
|
|
|
attempt++
|
2019-06-27 21:30:38 +02:00
|
|
|
_, err = e.Exec(query, args...)
|
2019-06-26 20:36:05 +02:00
|
|
|
if e, ok := err.(*mysql.MySQLError); ok && attempt <= maxAttempts && e.Number == 1205 {
|
|
|
|
//Error 1205: Lock wait timeout exceeded; try restarting transaction
|
|
|
|
goto Retry
|
|
|
|
}
|
2018-08-07 22:51:02 +02:00
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
|
2018-06-15 04:30:37 +02:00
|
|
|
/* SQL schema
|
|
|
|
|
2018-09-26 22:00:19 +02:00
|
|
|
in prod, set tx_isolation to READ-COMMITTED to improve db performance
|
|
|
|
|
2018-02-02 22:49:20 +01:00
|
|
|
CREATE TABLE blob_ (
|
|
|
|
hash char(96) NOT NULL,
|
2018-06-19 19:47:13 +02:00
|
|
|
is_stored TINYINT(1) NOT NULL DEFAULT 0,
|
2018-02-02 22:49:20 +01:00
|
|
|
length bigint(20) unsigned DEFAULT NULL,
|
2018-07-26 22:05:27 +02:00
|
|
|
PRIMARY KEY (hash)
|
2018-06-15 04:30:37 +02:00
|
|
|
);
|
2018-02-02 22:49:20 +01:00
|
|
|
|
|
|
|
CREATE TABLE stream (
|
|
|
|
hash char(96) NOT NULL,
|
|
|
|
sd_hash char(96) NOT NULL,
|
|
|
|
PRIMARY KEY (hash),
|
|
|
|
KEY sd_hash_idx (sd_hash),
|
2018-02-07 21:21:20 +01:00
|
|
|
FOREIGN KEY (sd_hash) REFERENCES blob_ (hash) ON DELETE RESTRICT ON UPDATE CASCADE
|
2018-06-15 04:30:37 +02:00
|
|
|
);
|
2018-02-02 22:49:20 +01:00
|
|
|
|
|
|
|
CREATE TABLE stream_blob (
|
|
|
|
stream_hash char(96) NOT NULL,
|
|
|
|
blob_hash char(96) NOT NULL,
|
|
|
|
num int NOT NULL,
|
|
|
|
PRIMARY KEY (stream_hash, blob_hash),
|
2018-02-07 21:21:20 +01:00
|
|
|
FOREIGN KEY (stream_hash) REFERENCES stream (hash) ON DELETE CASCADE ON UPDATE CASCADE,
|
|
|
|
FOREIGN KEY (blob_hash) REFERENCES blob_ (hash) ON DELETE CASCADE ON UPDATE CASCADE
|
2018-06-15 04:30:37 +02:00
|
|
|
);
|
|
|
|
|
2018-09-11 13:41:29 +02:00
|
|
|
CREATE TABLE blocked (
|
|
|
|
hash char(96) NOT NULL,
|
|
|
|
PRIMARY KEY (hash)
|
|
|
|
);
|
|
|
|
|
2018-06-15 04:30:37 +02:00
|
|
|
could add UNIQUE KEY (stream_hash, num) to stream_blob ...
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
*/
|