crypto: mcryptd - protect the per-CPU queue with a lock
mcryptd_enqueue_request() grabs the per-CPU queue struct and protects access to it with disabled preemption. Then it schedules a worker on the same CPU. The worker in mcryptd_queue_worker() guards access to the same per-CPU variable with disabled preemption. If we take CPU-hotplug into account then it is possible that between queue_work_on() and the actual invocation of the worker the CPU goes down and the worker will be scheduled on _another_ CPU. And here the preempt_disable() protection does not work anymore. The easiest thing is to add a spin_lock() to guard access to the list. Another detail: mcryptd_queue_worker() is not processing more than MCRYPTD_BATCH invocation in a row. If there are still items left, then it will invoke queue_work() to proceed with more later. *I* would suggest to simply drop that check because it does not use a system workqueue and the workqueue is already marked as "CPU_INTENSIVE". And if preemption is required then the scheduler should do it. However if queue_work() is used then the work item is marked as CPU unbound. That means it will try to run on the local CPU but it may run on another CPU as well. Especially with CONFIG_DEBUG_WQ_FORCE_RR_CPU=y. Again, the preempt_disable() won't work here but lock which was introduced will help. In order to keep work-item on the local CPU (and avoid RR) I changed it to queue_work_on(). Cc: stable@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
11edb55596
commit
9abffc6f2e
@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
|
|||||||
pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
|
pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
|
||||||
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
|
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
|
||||||
INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
|
INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
|
||||||
|
spin_lock_init(&cpu_queue->q_lock);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
|
|||||||
int cpu, err;
|
int cpu, err;
|
||||||
struct mcryptd_cpu_queue *cpu_queue;
|
struct mcryptd_cpu_queue *cpu_queue;
|
||||||
|
|
||||||
cpu = get_cpu();
|
cpu_queue = raw_cpu_ptr(queue->cpu_queue);
|
||||||
cpu_queue = this_cpu_ptr(queue->cpu_queue);
|
spin_lock(&cpu_queue->q_lock);
|
||||||
rctx->tag.cpu = cpu;
|
cpu = smp_processor_id();
|
||||||
|
rctx->tag.cpu = smp_processor_id();
|
||||||
|
|
||||||
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
||||||
pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
|
pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
|
||||||
cpu, cpu_queue, request);
|
cpu, cpu_queue, request);
|
||||||
|
spin_unlock(&cpu_queue->q_lock);
|
||||||
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
|
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
|
||||||
put_cpu();
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
|
|||||||
cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
|
cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
|
||||||
i = 0;
|
i = 0;
|
||||||
while (i < MCRYPTD_BATCH || single_task_running()) {
|
while (i < MCRYPTD_BATCH || single_task_running()) {
|
||||||
/*
|
|
||||||
* preempt_disable/enable is used to prevent
|
spin_lock_bh(&cpu_queue->q_lock);
|
||||||
* being preempted by mcryptd_enqueue_request()
|
|
||||||
*/
|
|
||||||
local_bh_disable();
|
|
||||||
preempt_disable();
|
|
||||||
backlog = crypto_get_backlog(&cpu_queue->queue);
|
backlog = crypto_get_backlog(&cpu_queue->queue);
|
||||||
req = crypto_dequeue_request(&cpu_queue->queue);
|
req = crypto_dequeue_request(&cpu_queue->queue);
|
||||||
preempt_enable();
|
spin_unlock_bh(&cpu_queue->q_lock);
|
||||||
local_bh_enable();
|
|
||||||
|
|
||||||
if (!req) {
|
if (!req) {
|
||||||
mcryptd_opportunistic_flush();
|
mcryptd_opportunistic_flush();
|
||||||
@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
|
|||||||
++i;
|
++i;
|
||||||
}
|
}
|
||||||
if (cpu_queue->queue.qlen)
|
if (cpu_queue->queue.qlen)
|
||||||
queue_work(kcrypto_wq, &cpu_queue->work);
|
queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mcryptd_flusher(struct work_struct *__work)
|
void mcryptd_flusher(struct work_struct *__work)
|
||||||
|
@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
|
|||||||
|
|
||||||
struct mcryptd_cpu_queue {
|
struct mcryptd_cpu_queue {
|
||||||
struct crypto_queue queue;
|
struct crypto_queue queue;
|
||||||
|
spinlock_t q_lock;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user