summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/pi_tests/pi_stress.c140
1 files changed, 127 insertions, 13 deletions
diff --git a/src/pi_tests/pi_stress.c b/src/pi_tests/pi_stress.c
index 89de656..eefdd15 100644
--- a/src/pi_tests/pi_stress.c
+++ b/src/pi_tests/pi_stress.c
@@ -196,6 +196,11 @@ struct group_parameters {
pthread_barrier_t elevate_barrier;
pthread_barrier_t finish_barrier;
+ /* Either everyone goes through the loop, or else no-ones does */
+ pthread_barrier_t loop_barr;
+ pthread_mutex_t loop_mtx; /* Protect access to int loop */
+ int loop; /* boolean, loop or not, connected to shutdown */
+
// state variables
volatile int high_has_run;
volatile int low_unlocked;
@@ -606,6 +611,9 @@ low_priority(void *arg)
int unbounded;
unsigned long count = 0;
struct group_parameters *p = (struct group_parameters *)arg;
+ pthread_barrier_t *loop_barr = &p->loop_barr;
+ pthread_mutex_t *loop_mtx = &p->loop_mtx;
+ int *loop = &p->loop;
allow_sigterm();
@@ -626,7 +634,38 @@ low_priority(void *arg)
debug("low_priority[%d]: starting inversion loop\n", p->id);
- while (!shutdown && (unbounded || (p->total < p->inversions))) {
+ for(;;) {
+ /* We can't set the 'loop' boolean here, because some flags
+ * may have already reached the loop_barr
+ */
+ if (!unbounded && (p->total >= p->inversions)) {
+ set_shutdown_flag();
+ }
+
+ /* Either all threads go through the loop_barr, or none do */
+ pthread_mutex_lock(loop_mtx);
+ if (*loop == 0) {
+ pthread_mutex_unlock(loop_mtx);
+ break;
+ }
+ pthread_mutex_unlock(loop_mtx);
+
+ status = pthread_barrier_wait(loop_barr);
+ if (status && status != PTHREAD_BARRIER_SERIAL_THREAD) {
+ error("%s[%d]: pthread_barrier_wait(loop): %x\n",
+ __func__, p->id, status);
+ return NULL;
+ }
+
+ /* Only one Thread needs to check the shutdown status */
+ if (status == PTHREAD_BARRIER_SERIAL_THREAD) {
+ if (shutdown) {
+ pthread_mutex_lock(loop_mtx);
+ *loop = 0;
+ pthread_mutex_unlock(loop_mtx);
+ }
+ }
+
/* initial state */
debug("low_priority[%d]: entering start wait (%d)\n", p->id, count++);
status = pthread_barrier_wait(&p->start_barrier);
@@ -634,12 +673,11 @@ low_priority(void *arg)
error("low_priority[%d]: pthread_barrier_wait(start): %x\n", p->id, status);
return NULL;
}
- if (shutdown) continue;
+
debug("low_priority[%d]: claiming mutex\n", p->id);
pthread_mutex_lock(&p->mutex);
debug("low_priority[%d]: mutex locked\n", p->id);
- if (shutdown) continue;
debug("low_priority[%d]: entering locked wait\n", p->id);
status = pthread_barrier_wait(&p->locked_barrier);
if (status && status != PTHREAD_BARRIER_SERIAL_THREAD) {
@@ -647,7 +685,6 @@ low_priority(void *arg)
return NULL;
}
- if (shutdown) continue;
// wait for priority boost
debug("low_priority[%d]: entering elevated wait\n", p->id);
p->low_unlocked = 0; /* prevent race with med_priority */
@@ -663,7 +700,6 @@ low_priority(void *arg)
pthread_mutex_unlock(&p->mutex);
// finish state
- if (shutdown) continue;
debug("low_priority[%d]: entering finish wait\n", p->id);
status = pthread_barrier_wait(&p->finish_barrier);
if (status && status != PTHREAD_BARRIER_SERIAL_THREAD) {
@@ -690,6 +726,9 @@ med_priority(void *arg)
int unbounded;
unsigned long count = 0;
struct group_parameters *p = (struct group_parameters *)arg;
+ pthread_barrier_t *loop_barr = &p->loop_barr;
+ pthread_mutex_t *loop_mtx = &p->loop_mtx;
+ int *loop = &p->loop;
allow_sigterm();
@@ -709,7 +748,34 @@ med_priority(void *arg)
unbounded = (p->inversions < 0);
debug("med_priority[%d]: starting inversion loop\n", p->id);
- while (!shutdown && (unbounded || (p->total < p->inversions))) {
+ for(;;) {
+ if (!unbounded && (p->total >= p->inversions)) {
+ set_shutdown_flag();
+ }
+ /* Either all threads go through the loop_barr, or none do */
+ pthread_mutex_lock(loop_mtx);
+ if (*loop == 0) {
+ pthread_mutex_unlock(loop_mtx);
+ break;
+ }
+ pthread_mutex_unlock(loop_mtx);
+
+ status = pthread_barrier_wait(loop_barr);
+ if (status && status != PTHREAD_BARRIER_SERIAL_THREAD) {
+ error("%s[%d]: pthread_barrier_wait(loop): %x\n",
+ __func__, p->id, status);
+ return NULL;
+ }
+
+ /* Only one Thread needs to check the shutdown status */
+ if (status == PTHREAD_BARRIER_SERIAL_THREAD) {
+ if (shutdown) {
+ pthread_mutex_lock(loop_mtx);
+ *loop = 0;
+ pthread_mutex_unlock(loop_mtx);
+ }
+ }
+
/* start state */
debug("med_priority[%d]: entering start state (%d)\n", p->id, count++);
status = pthread_barrier_wait(&p->start_barrier);
@@ -719,14 +785,12 @@ med_priority(void *arg)
}
debug("med_priority[%d]: entering elevate state\n", p->id);
do {
- if (shutdown) break;
status = pthread_barrier_wait(&p->elevate_barrier);
if (status && status != PTHREAD_BARRIER_SERIAL_THREAD) {
error("med_priority[%d]: pthread_barrier_wait(elevate): %x", p->id, status);
return NULL;
}
} while (!p->high_has_run && !p->low_unlocked);
- if (shutdown) continue;
debug("med_priority[%d]: entering finish state\n", p->id);
status = pthread_barrier_wait(&p->finish_barrier);
if (status && status != PTHREAD_BARRIER_SERIAL_THREAD) {
@@ -758,6 +822,9 @@ high_priority(void *arg)
int unbounded;
unsigned long count = 0;
struct group_parameters *p = (struct group_parameters *)arg;
+ pthread_barrier_t *loop_barr = &p->loop_barr;
+ pthread_mutex_t *loop_mtx = &p->loop_mtx;
+ int *loop = &p->loop;
allow_sigterm();
if (verify_cpu(p->cpu) != SUCCESS) {
@@ -770,13 +837,39 @@ high_priority(void *arg)
/* wait for all threads to be ready */
status = pthread_barrier_wait(&all_threads_ready);
if (status && status != PTHREAD_BARRIER_SERIAL_THREAD) {
- error("high_priority[%d]: pthread_barrier_wait(all_threads_ready): %x",
- p->id, status);
+ error("high_priority[%d]: pthread_barrier_wait(all_threads_ready): %x", p->id, status);
return NULL;
}
unbounded = (p->inversions < 0);
debug("high_priority[%d]: starting inversion loop\n", p->id);
- while (!shutdown && (unbounded || (p->total < p->inversions))) {
+ for(;;) {
+ if (!unbounded && (p->total >= p->inversions)) {
+ set_shutdown_flag();
+ }
+
+ /* Either all threads go through the loop_barr, or none do */
+ pthread_mutex_lock(loop_mtx);
+ if (*loop == 0) {
+ pthread_mutex_unlock(loop_mtx);
+ break;
+ }
+ pthread_mutex_unlock(loop_mtx);
+
+ status = pthread_barrier_wait(loop_barr);
+ if (status && status != PTHREAD_BARRIER_SERIAL_THREAD) {
+ error("%s[%d]: pthread_barrier_wait(loop): %x\n",
+ __func__, p->id, status);
+ return NULL;
+ }
+
+ /* Only one Thread needs to check the shutdown status */
+ if (status == PTHREAD_BARRIER_SERIAL_THREAD) {
+ if (shutdown) {
+ pthread_mutex_lock(loop_mtx);
+ *loop = 0;
+ pthread_mutex_unlock(loop_mtx);
+ }
+ }
p->high_has_run = 0;
debug("high_priority[%d]: entering start state (%d)\n", p->id, count++);
status = pthread_barrier_wait(&p->start_barrier);
@@ -784,7 +877,7 @@ high_priority(void *arg)
error("high_priority[%d]: pthread_barrier_wait(start): %x", p->id, status);
return NULL;
}
- if (shutdown) continue;
+
debug("high_priority[%d]: entering running state\n", p->id);
status = pthread_barrier_wait(&p->locked_barrier);
if (status && status != PTHREAD_BARRIER_SERIAL_THREAD) {
@@ -798,7 +891,7 @@ high_priority(void *arg)
debug("high_priority[%d]: unlocking mutex\n", p->id);
pthread_mutex_unlock(&p->mutex);
debug("high_priority[%d]: entering finish state\n", p->id);
- if (shutdown) continue;
+
status = pthread_barrier_wait(&p->finish_barrier);
if (status && status != PTHREAD_BARRIER_SERIAL_THREAD) {
error("high_priority[%d]: pthread_barrier_wait(finish): %x", status);
@@ -986,6 +1079,27 @@ initialize_group(struct group_parameters *group)
if (barrier_init(&group->finish_barrier, NULL, NUM_TEST_THREADS, "finish_barrier"))
return FAILURE;
+ if (barrier_init(&group->loop_barr, NULL, NUM_TEST_THREADS,
+ "loop_barrier"))
+ return FAILURE;
+
+ if ((status = pthread_mutex_init(&group->loop_mtx, NULL)) != 0) {
+ error("pthread_mutex_init, status = %d\n", status);
+ return FAILURE;
+ }
+
+ if ((status = pthread_mutex_lock(&group->loop_mtx)) != 0) {
+ error("pthread_mutex_lock, status = %d\n", status);
+ return FAILURE;
+ }
+
+ group->loop = 1;
+
+ if ((status = pthread_mutex_unlock(&group->loop_mtx)) != 0) {
+ error("pthread_mutex_unlock, status = %d\n", status);
+ return FAILURE;
+ }
+
return SUCCESS;
}
// setup and create a groups threads