Make the hashmeter thread-safe
This commit is contained in:
parent
80db6655c6
commit
9ba5e94d3d
1 changed files with 9 additions and 6 deletions
11
cpu-miner.c
11
cpu-miner.c
|
@ -291,13 +291,12 @@ static bool submit_upstream_work(CURL *curl, const struct work *work)
|
||||||
|
|
||||||
res = json_object_get(val, "result");
|
res = json_object_get(val, "result");
|
||||||
|
|
||||||
pthread_mutex_lock(&stats_lock);
|
|
||||||
json_is_true(res) ? accepted_count++ : rejected_count++;
|
|
||||||
pthread_mutex_unlock(&stats_lock);
|
|
||||||
|
|
||||||
hashrate = 0.;
|
hashrate = 0.;
|
||||||
|
pthread_mutex_lock(&stats_lock);
|
||||||
for (i = 0; i < opt_n_threads; i++)
|
for (i = 0; i < opt_n_threads; i++)
|
||||||
hashrate += thr_hashrates[i];
|
hashrate += thr_hashrates[i];
|
||||||
|
json_is_true(res) ? accepted_count++ : rejected_count++;
|
||||||
|
pthread_mutex_unlock(&stats_lock);
|
||||||
|
|
||||||
applog(LOG_INFO, "accepted: %lu/%lu (%.2f%%), %.2f khash/s %s",
|
applog(LOG_INFO, "accepted: %lu/%lu (%.2f%%), %.2f khash/s %s",
|
||||||
accepted_count,
|
accepted_count,
|
||||||
|
@ -586,8 +585,12 @@ static void *miner_thread(void *userdata)
|
||||||
/* record scanhash elapsed time */
|
/* record scanhash elapsed time */
|
||||||
gettimeofday(&tv_end, NULL);
|
gettimeofday(&tv_end, NULL);
|
||||||
timeval_subtract(&diff, &tv_end, &tv_start);
|
timeval_subtract(&diff, &tv_end, &tv_start);
|
||||||
|
if (diff.tv_usec || diff.tv_sec) {
|
||||||
|
pthread_mutex_lock(&stats_lock);
|
||||||
thr_hashrates[thr_id] =
|
thr_hashrates[thr_id] =
|
||||||
hashes_done / (diff.tv_sec + 1e-6 * diff.tv_usec);
|
hashes_done / (diff.tv_sec + 1e-6 * diff.tv_usec);
|
||||||
|
pthread_mutex_unlock(&stats_lock);
|
||||||
|
}
|
||||||
if (!opt_quiet)
|
if (!opt_quiet)
|
||||||
applog(LOG_INFO, "thread %d: %lu hashes, %.2f khash/s",
|
applog(LOG_INFO, "thread %d: %lu hashes, %.2f khash/s",
|
||||||
thr_id, hashes_done, 1e-3 * thr_hashrates[thr_id]);
|
thr_id, hashes_done, 1e-3 * thr_hashrates[thr_id]);
|
||||||
|
|
Loading…
Reference in a new issue