mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-25 19:30:11 +01:00
util/queue: add ability to kill a subset of threads
for ARB_parallel_shader_compile
This commit is contained in:
parent
d99cdc9d59
commit
bb111559f2
2 changed files with 36 additions and 21 deletions
|
|
@ -33,7 +33,8 @@
|
|||
#include "util/u_thread.h"
|
||||
#include "u_process.h"
|
||||
|
||||
static void util_queue_killall_and_wait(struct util_queue *queue);
|
||||
static void
|
||||
util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads);
|
||||
|
||||
/****************************************************************************
|
||||
* Wait for all queues to assert idle when exit() is called.
|
||||
|
|
@ -54,7 +55,7 @@ atexit_handler(void)
|
|||
mtx_lock(&exit_mutex);
|
||||
/* Wait for all queues to assert idle. */
|
||||
LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
|
||||
util_queue_killall_and_wait(iter);
|
||||
util_queue_kill_threads(iter, 0);
|
||||
}
|
||||
mtx_unlock(&exit_mutex);
|
||||
}
|
||||
|
|
@ -266,10 +267,11 @@ util_queue_thread_func(void *input)
|
|||
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
|
||||
|
||||
/* wait if the queue is empty */
|
||||
while (!queue->kill_threads && queue->num_queued == 0)
|
||||
while (thread_index < queue->num_threads && queue->num_queued == 0)
|
||||
cnd_wait(&queue->has_queued_cond, &queue->lock);
|
||||
|
||||
if (queue->kill_threads) {
|
||||
/* only kill threads that are above "num_threads" */
|
||||
if (thread_index >= queue->num_threads) {
|
||||
mtx_unlock(&queue->lock);
|
||||
break;
|
||||
}
|
||||
|
|
@ -290,17 +292,19 @@ util_queue_thread_func(void *input)
|
|||
}
|
||||
}
|
||||
|
||||
/* signal remaining jobs before terminating */
|
||||
/* signal remaining jobs if all threads are being terminated */
|
||||
mtx_lock(&queue->lock);
|
||||
for (unsigned i = queue->read_idx; i != queue->write_idx;
|
||||
i = (i + 1) % queue->max_jobs) {
|
||||
if (queue->jobs[i].job) {
|
||||
util_queue_fence_signal(queue->jobs[i].fence);
|
||||
queue->jobs[i].job = NULL;
|
||||
if (queue->num_threads == 0) {
|
||||
for (unsigned i = queue->read_idx; i != queue->write_idx;
|
||||
i = (i + 1) % queue->max_jobs) {
|
||||
if (queue->jobs[i].job) {
|
||||
util_queue_fence_signal(queue->jobs[i].fence);
|
||||
queue->jobs[i].job = NULL;
|
||||
}
|
||||
}
|
||||
queue->read_idx = queue->write_idx;
|
||||
queue->num_queued = 0;
|
||||
}
|
||||
queue->read_idx = queue->write_idx;
|
||||
queue->num_queued = 0;
|
||||
mtx_unlock(&queue->lock);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -425,25 +429,37 @@ fail:
|
|||
}
|
||||
|
||||
static void
|
||||
util_queue_killall_and_wait(struct util_queue *queue)
|
||||
util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Signal all threads to terminate. */
|
||||
mtx_lock(&queue->finish_lock);
|
||||
|
||||
if (keep_num_threads >= queue->num_threads) {
|
||||
mtx_unlock(&queue->finish_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
mtx_lock(&queue->lock);
|
||||
queue->kill_threads = 1;
|
||||
unsigned old_num_threads = queue->num_threads;
|
||||
/* Setting num_threads is what causes the threads to terminate.
|
||||
* Then cnd_broadcast wakes them up and they will exit their function.
|
||||
*/
|
||||
queue->num_threads = keep_num_threads;
|
||||
cnd_broadcast(&queue->has_queued_cond);
|
||||
mtx_unlock(&queue->lock);
|
||||
|
||||
for (i = 0; i < queue->num_threads; i++)
|
||||
for (i = keep_num_threads; i < old_num_threads; i++)
|
||||
thrd_join(queue->threads[i], NULL);
|
||||
queue->num_threads = 0;
|
||||
|
||||
mtx_unlock(&queue->finish_lock);
|
||||
}
|
||||
|
||||
void
|
||||
util_queue_destroy(struct util_queue *queue)
|
||||
{
|
||||
util_queue_killall_and_wait(queue);
|
||||
util_queue_kill_threads(queue, 0);
|
||||
remove_from_atexit_list(queue);
|
||||
|
||||
cnd_destroy(&queue->has_space_cond);
|
||||
|
|
@ -464,7 +480,7 @@ util_queue_add_job(struct util_queue *queue,
|
|||
struct util_queue_job *ptr;
|
||||
|
||||
mtx_lock(&queue->lock);
|
||||
if (queue->kill_threads) {
|
||||
if (queue->num_threads == 0) {
|
||||
mtx_unlock(&queue->lock);
|
||||
/* well no good option here, but any leaks will be
|
||||
* short-lived as things are shutting down..
|
||||
|
|
|
|||
|
|
@ -201,15 +201,14 @@ struct util_queue_job {
|
|||
/* Put this into your context. */
|
||||
struct util_queue {
|
||||
char name[14]; /* 13 characters = the thread name without the index */
|
||||
mtx_t finish_lock; /* only for util_queue_finish */
|
||||
mtx_t finish_lock; /* for util_queue_finish and protects threads/num_threads */
|
||||
mtx_t lock;
|
||||
cnd_t has_queued_cond;
|
||||
cnd_t has_space_cond;
|
||||
thrd_t *threads;
|
||||
unsigned flags;
|
||||
int num_queued;
|
||||
unsigned num_threads;
|
||||
int kill_threads;
|
||||
unsigned num_threads; /* decreasing this number will terminate threads */
|
||||
int max_jobs;
|
||||
int write_idx, read_idx; /* ring buffer pointers */
|
||||
struct util_queue_job *jobs;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue