mirror of
https://github.com/qemu/qemu.git
synced 2026-02-04 02:24:51 +00:00
gluster: Do not move coroutine into BDS context
The request coroutine may not run in the BDS AioContext. We should wake it in its own context, not move it. With that, we can remove GlusterAIOCB.aio_context. Also add a comment why aio_co_schedule() is safe to use in this way. **Note:** Due to a lack of a gluster set-up, I have not tested this commit. It seemed safe enough to send anyway, just maybe not to qemu-stable. To be clear, I don’t know of any user-visible bugs that would arise from the state without this patch; the request coroutine is moved into the main BDS AioContext, so guest device completion code will run in a different context than where the request started, which can’t be good, but I haven’t actually confirmed any bugs (due to not being able to test it). Signed-off-by: Hanna Czenczek <hreitz@redhat.com> Message-ID: <20251110154854.151484-7-hreitz@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
committed by
Kevin Wolf
parent
53d5c7ffac
commit
7214ad20da
@@ -56,7 +56,6 @@ typedef struct GlusterAIOCB {
|
||||
int64_t size;
|
||||
int ret;
|
||||
Coroutine *coroutine;
|
||||
AioContext *aio_context;
|
||||
} GlusterAIOCB;
|
||||
|
||||
typedef struct BDRVGlusterState {
|
||||
@@ -743,7 +742,17 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret,
|
||||
acb->ret = -EIO; /* Partial read/write - fail it */
|
||||
}
|
||||
|
||||
aio_co_schedule(acb->aio_context, acb->coroutine);
|
||||
/*
|
||||
* Safe to call: The coroutine will yield exactly once awaiting this
|
||||
* scheduling, and the context is its own context, so it will be scheduled
|
||||
* once it does yield.
|
||||
*
|
||||
* (aio_co_wake() would call qemu_get_current_aio_context() to check whether
|
||||
* we are in the same context, but we are not in a qemu thread, so we cannot
|
||||
* do that. Use aio_co_schedule() directly.)
|
||||
*/
|
||||
aio_co_schedule(qemu_coroutine_get_aio_context(acb->coroutine),
|
||||
acb->coroutine);
|
||||
}
|
||||
|
||||
static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
|
||||
@@ -1006,7 +1015,6 @@ static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
acb.size = bytes;
|
||||
acb.ret = 0;
|
||||
acb.coroutine = qemu_coroutine_self();
|
||||
acb.aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
ret = glfs_zerofill_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb);
|
||||
if (ret < 0) {
|
||||
@@ -1184,7 +1192,6 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
|
||||
acb.size = size;
|
||||
acb.ret = 0;
|
||||
acb.coroutine = qemu_coroutine_self();
|
||||
acb.aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
if (write) {
|
||||
ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
|
||||
@@ -1251,7 +1258,6 @@ static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
|
||||
acb.size = 0;
|
||||
acb.ret = 0;
|
||||
acb.coroutine = qemu_coroutine_self();
|
||||
acb.aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
ret = glfs_fsync_async(s->fd, gluster_finish_aiocb, &acb);
|
||||
if (ret < 0) {
|
||||
@@ -1299,7 +1305,6 @@ static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs,
|
||||
acb.size = 0;
|
||||
acb.ret = 0;
|
||||
acb.coroutine = qemu_coroutine_self();
|
||||
acb.aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
ret = glfs_discard_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb);
|
||||
if (ret < 0) {
|
||||
|
||||
Reference in New Issue
Block a user