From 6661ca7e1b0de43aad0ea263a5e3bc0ff350e984 Mon Sep 17 00:00:00 2001 From: Axel Gembe Date: Tue, 10 Nov 2020 16:30:53 +0700 Subject: [PATCH] Reduce memory allocations when generating the merkle root When generating the merkle root, the previous code calls `malloc` once for every transaction, which is not ideal for performance. The new code allocates a 32 KiB buffer once and reuses it for all transactions. If a TX is bigger than the current buffer size, the buffer is reallocated with double the current buffer size. --- cpu-miner.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cpu-miner.c b/cpu-miner.c index 394978b..5fcb3f9 100644 --- a/cpu-miner.c +++ b/cpu-miner.c @@ -351,6 +351,7 @@ static bool gbt_work_decode(const json_t *val, struct work *work) uint32_t target[8]; int cbtx_size; unsigned char *cbtx = NULL; + unsigned char *tx = NULL; int tx_count, tx_size; unsigned char txc_vi[9]; unsigned char (*merkle_tree)[32] = NULL; @@ -581,6 +582,8 @@ static bool gbt_work_decode(const json_t *val, struct work *work) /* generate merkle root */ merkle_tree = malloc(32 * ((1 + tx_count + 1) & ~1)); + size_t tx_buf_size = 32 * 1024; + tx = malloc(tx_buf_size); sha256d(merkle_tree[0], cbtx, cbtx_size); for (i = 0; i < tx_count; i++) { tmp = json_array_get(txa, i); @@ -594,18 +597,21 @@ static bool gbt_work_decode(const json_t *val, struct work *work) } memrev(merkle_tree[1 + i], 32); } else { - unsigned char *tx = malloc(tx_size); + if (tx_size > tx_buf_size) { + free(tx); + tx_buf_size = tx_size * 2; + tx = malloc(tx_buf_size); + } if (!tx_hex || !hex2bin(tx, tx_hex, tx_size)) { applog(LOG_ERR, "JSON invalid transactions"); - free(tx); goto out; } sha256d(merkle_tree[1 + i], tx, tx_size); - free(tx); } if (!submit_coinbase) strcat(work->txs, tx_hex); } + free(tx); tx = NULL; n = 1 + tx_count; while (n > 1) { if (n % 2) { @@ -662,6 +668,7 @@ static bool gbt_work_decode(const json_t *val, struct work *work) rc = true; out: + free(tx); free(cbtx); free(merkle_tree); return rc;