17a91051fe
There is an old problem with io-wq cancellation where requests should be killed and are in io-wq but are not discoverable, e.g. in @next_hashed or @linked vars of io_worker_handle_work(). It adds some unreliability to individual request canellation, but also may potentially get __io_uring_cancel() stuck. For instance: 1) An __io_uring_cancel()'s cancellation round have not found any request but there are some as desribed. 2) __io_uring_cancel() goes to sleep 3) Then workers wake up and try to execute those hidden requests that happen to be unbound. As we already cancel all requests of io-wq there, set IO_WQ_BIT_EXIT in advance, so preventing 3) from executing unbound requests. The workers will initially break looping because of getting a signal as they are threads of the dying/exec()'ing user task. Cc: stable@vger.kernel.org Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/abfcf8c54cb9e8f7bfbad7e9a0cc5433cc70bdc2.1621781238.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
159 lines
3.6 KiB
C
159 lines
3.6 KiB
C
#ifndef INTERNAL_IO_WQ_H
|
|
#define INTERNAL_IO_WQ_H
|
|
|
|
#include <linux/refcount.h>
|
|
|
|
struct io_wq;
|
|
|
|
enum {
|
|
IO_WQ_WORK_CANCEL = 1,
|
|
IO_WQ_WORK_HASHED = 2,
|
|
IO_WQ_WORK_UNBOUND = 4,
|
|
IO_WQ_WORK_CONCURRENT = 16,
|
|
|
|
IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
|
|
};
|
|
|
|
enum io_wq_cancel {
|
|
IO_WQ_CANCEL_OK, /* cancelled before started */
|
|
IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
|
|
IO_WQ_CANCEL_NOTFOUND, /* work not found */
|
|
};
|
|
|
|
struct io_wq_work_node {
|
|
struct io_wq_work_node *next;
|
|
};
|
|
|
|
struct io_wq_work_list {
|
|
struct io_wq_work_node *first;
|
|
struct io_wq_work_node *last;
|
|
};
|
|
|
|
static inline void wq_list_add_after(struct io_wq_work_node *node,
|
|
struct io_wq_work_node *pos,
|
|
struct io_wq_work_list *list)
|
|
{
|
|
struct io_wq_work_node *next = pos->next;
|
|
|
|
pos->next = node;
|
|
node->next = next;
|
|
if (!next)
|
|
list->last = node;
|
|
}
|
|
|
|
static inline void wq_list_add_tail(struct io_wq_work_node *node,
|
|
struct io_wq_work_list *list)
|
|
{
|
|
if (!list->first) {
|
|
list->last = node;
|
|
WRITE_ONCE(list->first, node);
|
|
} else {
|
|
list->last->next = node;
|
|
list->last = node;
|
|
}
|
|
node->next = NULL;
|
|
}
|
|
|
|
static inline void wq_list_cut(struct io_wq_work_list *list,
|
|
struct io_wq_work_node *last,
|
|
struct io_wq_work_node *prev)
|
|
{
|
|
/* first in the list, if prev==NULL */
|
|
if (!prev)
|
|
WRITE_ONCE(list->first, last->next);
|
|
else
|
|
prev->next = last->next;
|
|
|
|
if (last == list->last)
|
|
list->last = prev;
|
|
last->next = NULL;
|
|
}
|
|
|
|
static inline void wq_list_del(struct io_wq_work_list *list,
|
|
struct io_wq_work_node *node,
|
|
struct io_wq_work_node *prev)
|
|
{
|
|
wq_list_cut(list, node, prev);
|
|
}
|
|
|
|
#define wq_list_for_each(pos, prv, head) \
|
|
for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
|
|
|
|
#define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
|
|
#define INIT_WQ_LIST(list) do { \
|
|
(list)->first = NULL; \
|
|
(list)->last = NULL; \
|
|
} while (0)
|
|
|
|
struct io_wq_work {
|
|
struct io_wq_work_node list;
|
|
const struct cred *creds;
|
|
unsigned flags;
|
|
};
|
|
|
|
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
|
|
{
|
|
if (!work->list.next)
|
|
return NULL;
|
|
|
|
return container_of(work->list.next, struct io_wq_work, list);
|
|
}
|
|
|
|
typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
|
|
typedef void (io_wq_work_fn)(struct io_wq_work *);
|
|
|
|
struct io_wq_hash {
|
|
refcount_t refs;
|
|
unsigned long map;
|
|
struct wait_queue_head wait;
|
|
};
|
|
|
|
static inline void io_wq_put_hash(struct io_wq_hash *hash)
|
|
{
|
|
if (refcount_dec_and_test(&hash->refs))
|
|
kfree(hash);
|
|
}
|
|
|
|
struct io_wq_data {
|
|
struct io_wq_hash *hash;
|
|
struct task_struct *task;
|
|
io_wq_work_fn *do_work;
|
|
free_work_fn *free_work;
|
|
};
|
|
|
|
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
|
|
void io_wq_exit_start(struct io_wq *wq);
|
|
void io_wq_put_and_exit(struct io_wq *wq);
|
|
|
|
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
|
|
void io_wq_hash_work(struct io_wq_work *work, void *val);
|
|
|
|
static inline bool io_wq_is_hashed(struct io_wq_work *work)
|
|
{
|
|
return work->flags & IO_WQ_WORK_HASHED;
|
|
}
|
|
|
|
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
|
|
|
|
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
|
void *data, bool cancel_all);
|
|
|
|
#if defined(CONFIG_IO_WQ)
|
|
extern void io_wq_worker_sleeping(struct task_struct *);
|
|
extern void io_wq_worker_running(struct task_struct *);
|
|
#else
|
|
static inline void io_wq_worker_sleeping(struct task_struct *tsk)
|
|
{
|
|
}
|
|
static inline void io_wq_worker_running(struct task_struct *tsk)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
static inline bool io_wq_current_is_worker(void)
|
|
{
|
|
return in_task() && (current->flags & PF_IO_WORKER) &&
|
|
current->pf_io_worker;
|
|
}
|
|
#endif
|