diff options
| author | Jens Axboe <axboe@kernel.dk> | 2022-07-24 18:41:03 -0600 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2022-07-24 18:41:03 -0600 |
| commit | 4effe18fc0da27ae5d51a702841e87fa13b8a32d (patch) | |
| tree | 468f353a3713c93b27e7b2c262efd747e66ff199 /io_uring/sync.c | |
| parent | Merge branch 'io_uring-zerocopy-send' of git://git.kernel.org/pub/scm/linux/k... (diff) | |
| parent | io_uring: ensure REQ_F_ISREG is set async offload (diff) | |
| download | linux-4effe18fc0da27ae5d51a702841e87fa13b8a32d.tar.gz linux-4effe18fc0da27ae5d51a702841e87fa13b8a32d.zip | |
Merge branch 'for-5.20/io_uring' into for-5.20/io_uring-zerocopy-send
* for-5.20/io_uring: (716 commits)
io_uring: ensure REQ_F_ISREG is set async offload
net: fix compat pointer in get_compat_msghdr()
io_uring: Don't require reinitable percpu_ref
io_uring: fix types in io_recvmsg_multishot_overflow
io_uring: Use atomic_long_try_cmpxchg in __io_account_mem
io_uring: support multishot in recvmsg
net: copy from user before calling __get_compat_msghdr
net: copy from user before calling __copy_msghdr
io_uring: support 0 length iov in buffer select in compat
io_uring: fix multishot ending when not polled
io_uring: add netmsg cache
io_uring: impose max limit on apoll cache
io_uring: add abstraction around apoll cache
io_uring: move apoll cache to poll.c
io_uring: consolidate hash_locked io-wq handling
io_uring: clear REQ_F_HASH_LOCKED on hash removal
io_uring: don't race double poll setting REQ_F_ASYNC_DATA
io_uring: don't miss setting REQ_F_DOUBLE_POLL
io_uring: disable multishot recvmsg
io_uring: only trace one of complete or overflow
...
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/sync.c')
| -rw-r--r-- | io_uring/sync.c | 110 |
1 files changed, 110 insertions, 0 deletions
diff --git a/io_uring/sync.c b/io_uring/sync.c new file mode 100644 index 000000000000..f2102afa79ca --- /dev/null +++ b/io_uring/sync.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/namei.h> +#include <linux/io_uring.h> +#include <linux/fsnotify.h> + +#include <uapi/linux/io_uring.h> + +#include "io_uring.h" +#include "sync.h" + +struct io_sync { + struct file *file; + loff_t len; + loff_t off; + int flags; + int mode; +}; + +int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_sync *sync = io_kiocb_to_cmd(req); + + if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) + return -EINVAL; + + sync->off = READ_ONCE(sqe->off); + sync->len = READ_ONCE(sqe->len); + sync->flags = READ_ONCE(sqe->sync_range_flags); + return 0; +} + +int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_sync *sync = io_kiocb_to_cmd(req); + int ret; + + /* sync_file_range always requires a blocking context */ + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + + ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); + io_req_set_res(req, ret, 0); + return IOU_OK; +} + +int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_sync *sync = io_kiocb_to_cmd(req); + + if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) + return -EINVAL; + + sync->flags = READ_ONCE(sqe->fsync_flags); + if (unlikely(sync->flags & ~IORING_FSYNC_DATASYNC)) + return -EINVAL; + + sync->off = READ_ONCE(sqe->off); + sync->len = READ_ONCE(sqe->len); + return 0; +} + +int io_fsync(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_sync *sync = io_kiocb_to_cmd(req); + loff_t end = sync->off + sync->len; + int ret; + + /* fsync always requires a blocking context */ + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + + ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX, + sync->flags & IORING_FSYNC_DATASYNC); + io_req_set_res(req, ret, 0); + return IOU_OK; +} + +int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_sync *sync = io_kiocb_to_cmd(req); + + if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) + return -EINVAL; + + sync->off = READ_ONCE(sqe->off); + sync->len = READ_ONCE(sqe->addr); + sync->mode = READ_ONCE(sqe->len); + return 0; +} + +int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_sync *sync = io_kiocb_to_cmd(req); + int ret; + + /* fallocate always requiring blocking context */ + if (issue_flags & IO_URING_F_NONBLOCK) + return -EAGAIN; + ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len); + if (ret >= 0) + fsnotify_modify(req->file); + io_req_set_res(req, ret, 0); + return IOU_OK; +} |
