1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/fdtable.h>
7 #include <linux/fsnotify.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
10
11 #include <uapi/linux/io_uring.h>
12
13 #include "../fs/internal.h"
14
15 #include "io_uring.h"
16 #include "rsrc.h"
17 #include "openclose.h"
18
19 struct io_open {
20 struct file *file;
21 int dfd;
22 u32 file_slot;
23 struct filename *filename;
24 struct open_how how;
25 unsigned long nofile;
26 };
27
28 struct io_close {
29 struct file *file;
30 int fd;
31 u32 file_slot;
32 };
33
io_openat_force_async(struct io_open * open)34 static bool io_openat_force_async(struct io_open *open)
35 {
36 /*
37 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
38 * it'll always -EAGAIN. Note that we test for __O_TMPFILE because
39 * O_TMPFILE includes O_DIRECTORY, which isn't a flag we need to force
40 * async for.
41 */
42 return open->how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE);
43 }
44
__io_openat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)45 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
46 {
47 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
48 const char __user *fname;
49 int ret;
50
51 if (unlikely(sqe->buf_index))
52 return -EINVAL;
53 if (unlikely(req->flags & REQ_F_FIXED_FILE))
54 return -EBADF;
55
56 /* open.how should be already initialised */
57 if (!(open->how.flags & O_PATH) && force_o_largefile())
58 open->how.flags |= O_LARGEFILE;
59
60 open->dfd = READ_ONCE(sqe->fd);
61 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
62 open->filename = getname(fname);
63 if (IS_ERR(open->filename)) {
64 ret = PTR_ERR(open->filename);
65 open->filename = NULL;
66 return ret;
67 }
68
69 open->file_slot = READ_ONCE(sqe->file_index);
70 if (open->file_slot && (open->how.flags & O_CLOEXEC))
71 return -EINVAL;
72
73 open->nofile = rlimit(RLIMIT_NOFILE);
74 req->flags |= REQ_F_NEED_CLEANUP;
75 if (io_openat_force_async(open))
76 req->flags |= REQ_F_FORCE_ASYNC;
77 return 0;
78 }
79
io_openat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)80 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
81 {
82 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
83 u64 mode = READ_ONCE(sqe->len);
84 u64 flags = READ_ONCE(sqe->open_flags);
85
86 open->how = build_open_how(flags, mode);
87 return __io_openat_prep(req, sqe);
88 }
89
io_openat2_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)90 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
91 {
92 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
93 struct open_how __user *how;
94 size_t len;
95 int ret;
96
97 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
98 len = READ_ONCE(sqe->len);
99 if (len < OPEN_HOW_SIZE_VER0)
100 return -EINVAL;
101
102 ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len);
103 if (ret)
104 return ret;
105
106 return __io_openat_prep(req, sqe);
107 }
108
io_openat2(struct io_kiocb * req,unsigned int issue_flags)109 int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
110 {
111 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
112 struct open_flags op;
113 struct file *file;
114 bool resolve_nonblock, nonblock_set;
115 bool fixed = !!open->file_slot;
116 int ret;
117
118 ret = build_open_flags(&open->how, &op);
119 if (ret)
120 goto err;
121 nonblock_set = op.open_flag & O_NONBLOCK;
122 resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
123 if (issue_flags & IO_URING_F_NONBLOCK) {
124 WARN_ON_ONCE(io_openat_force_async(open));
125 op.lookup_flags |= LOOKUP_CACHED;
126 op.open_flag |= O_NONBLOCK;
127 }
128
129 if (!fixed) {
130 ret = __get_unused_fd_flags(open->how.flags, open->nofile);
131 if (ret < 0)
132 goto err;
133 }
134
135 file = do_filp_open(open->dfd, open->filename, &op);
136 if (IS_ERR(file)) {
137 /*
138 * We could hang on to this 'fd' on retrying, but seems like
139 * marginal gain for something that is now known to be a slower
140 * path. So just put it, and we'll get a new one when we retry.
141 */
142 if (!fixed)
143 put_unused_fd(ret);
144
145 ret = PTR_ERR(file);
146 /* only retry if RESOLVE_CACHED wasn't already set by application */
147 if (ret == -EAGAIN &&
148 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
149 return -EAGAIN;
150 goto err;
151 }
152
153 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
154 file->f_flags &= ~O_NONBLOCK;
155
156 if (!fixed)
157 fd_install(ret, file);
158 else
159 ret = io_fixed_fd_install(req, issue_flags, file,
160 open->file_slot);
161 err:
162 putname(open->filename);
163 req->flags &= ~REQ_F_NEED_CLEANUP;
164 if (ret < 0)
165 req_set_fail(req);
166 io_req_set_res(req, ret, 0);
167 return IOU_OK;
168 }
169
io_openat(struct io_kiocb * req,unsigned int issue_flags)170 int io_openat(struct io_kiocb *req, unsigned int issue_flags)
171 {
172 return io_openat2(req, issue_flags);
173 }
174
io_open_cleanup(struct io_kiocb * req)175 void io_open_cleanup(struct io_kiocb *req)
176 {
177 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
178
179 if (open->filename)
180 putname(open->filename);
181 }
182
__io_close_fixed(struct io_ring_ctx * ctx,unsigned int issue_flags,unsigned int offset)183 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
184 unsigned int offset)
185 {
186 int ret;
187
188 io_ring_submit_lock(ctx, issue_flags);
189 ret = io_fixed_fd_remove(ctx, offset);
190 io_ring_submit_unlock(ctx, issue_flags);
191
192 return ret;
193 }
194
io_close_fixed(struct io_kiocb * req,unsigned int issue_flags)195 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
196 {
197 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
198
199 return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
200 }
201
io_close_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)202 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
203 {
204 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
205
206 if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
207 return -EINVAL;
208 if (req->flags & REQ_F_FIXED_FILE)
209 return -EBADF;
210
211 close->fd = READ_ONCE(sqe->fd);
212 close->file_slot = READ_ONCE(sqe->file_index);
213 if (close->file_slot && close->fd)
214 return -EINVAL;
215
216 return 0;
217 }
218
io_close(struct io_kiocb * req,unsigned int issue_flags)219 int io_close(struct io_kiocb *req, unsigned int issue_flags)
220 {
221 struct files_struct *files = current->files;
222 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
223 struct fdtable *fdt;
224 struct file *file;
225 int ret = -EBADF;
226
227 if (close->file_slot) {
228 ret = io_close_fixed(req, issue_flags);
229 goto err;
230 }
231
232 spin_lock(&files->file_lock);
233 fdt = files_fdtable(files);
234 if (close->fd >= fdt->max_fds) {
235 spin_unlock(&files->file_lock);
236 goto err;
237 }
238 file = rcu_dereference_protected(fdt->fd[close->fd],
239 lockdep_is_held(&files->file_lock));
240 if (!file || io_is_uring_fops(file)) {
241 spin_unlock(&files->file_lock);
242 goto err;
243 }
244
245 /* if the file has a flush method, be safe and punt to async */
246 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
247 spin_unlock(&files->file_lock);
248 return -EAGAIN;
249 }
250
251 file = __close_fd_get_file(close->fd);
252 spin_unlock(&files->file_lock);
253 if (!file)
254 goto err;
255
256 /* No ->flush() or already async, safely close from here */
257 ret = filp_close(file, current->files);
258 err:
259 if (ret < 0)
260 req_set_fail(req);
261 io_req_set_res(req, ret, 0);
262 return IOU_OK;
263 }
264