1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/fdtable.h>
7 #include <linux/fsnotify.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
10
11 #include <uapi/linux/io_uring.h>
12
13 #include "../fs/internal.h"
14
15 #include "io_uring.h"
16 #include "rsrc.h"
17 #include "openclose.h"
18
19 struct io_open {
20 struct file *file;
21 int dfd;
22 u32 file_slot;
23 struct filename *filename;
24 struct open_how how;
25 unsigned long nofile;
26 };
27
28 struct io_close {
29 struct file *file;
30 int fd;
31 u32 file_slot;
32 };
33
__io_openat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)34 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
35 {
36 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
37 const char __user *fname;
38 int ret;
39
40 if (unlikely(sqe->buf_index))
41 return -EINVAL;
42 if (unlikely(req->flags & REQ_F_FIXED_FILE))
43 return -EBADF;
44
45 /* open.how should be already initialised */
46 if (!(open->how.flags & O_PATH) && force_o_largefile())
47 open->how.flags |= O_LARGEFILE;
48
49 open->dfd = READ_ONCE(sqe->fd);
50 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
51 open->filename = getname(fname);
52 if (IS_ERR(open->filename)) {
53 ret = PTR_ERR(open->filename);
54 open->filename = NULL;
55 return ret;
56 }
57
58 open->file_slot = READ_ONCE(sqe->file_index);
59 if (open->file_slot && (open->how.flags & O_CLOEXEC))
60 return -EINVAL;
61
62 open->nofile = rlimit(RLIMIT_NOFILE);
63 req->flags |= REQ_F_NEED_CLEANUP;
64 return 0;
65 }
66
io_openat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)67 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
68 {
69 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
70 u64 mode = READ_ONCE(sqe->len);
71 u64 flags = READ_ONCE(sqe->open_flags);
72
73 open->how = build_open_how(flags, mode);
74 return __io_openat_prep(req, sqe);
75 }
76
io_openat2_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)77 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
78 {
79 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
80 struct open_how __user *how;
81 size_t len;
82 int ret;
83
84 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
85 len = READ_ONCE(sqe->len);
86 if (len < OPEN_HOW_SIZE_VER0)
87 return -EINVAL;
88
89 ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len);
90 if (ret)
91 return ret;
92
93 return __io_openat_prep(req, sqe);
94 }
95
io_openat2(struct io_kiocb * req,unsigned int issue_flags)96 int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
97 {
98 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
99 struct open_flags op;
100 struct file *file;
101 bool resolve_nonblock, nonblock_set;
102 bool fixed = !!open->file_slot;
103 int ret;
104
105 ret = build_open_flags(&open->how, &op);
106 if (ret)
107 goto err;
108 nonblock_set = op.open_flag & O_NONBLOCK;
109 resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
110 if (issue_flags & IO_URING_F_NONBLOCK) {
111 /*
112 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
113 * it'll always -EAGAIN. Note that we test for __O_TMPFILE
114 * because O_TMPFILE includes O_DIRECTORY, which isn't a flag
115 * we need to force async for.
116 */
117 if (open->how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE))
118 return -EAGAIN;
119 op.lookup_flags |= LOOKUP_CACHED;
120 op.open_flag |= O_NONBLOCK;
121 }
122
123 if (!fixed) {
124 ret = __get_unused_fd_flags(open->how.flags, open->nofile);
125 if (ret < 0)
126 goto err;
127 }
128
129 file = do_filp_open(open->dfd, open->filename, &op);
130 if (IS_ERR(file)) {
131 /*
132 * We could hang on to this 'fd' on retrying, but seems like
133 * marginal gain for something that is now known to be a slower
134 * path. So just put it, and we'll get a new one when we retry.
135 */
136 if (!fixed)
137 put_unused_fd(ret);
138
139 ret = PTR_ERR(file);
140 /* only retry if RESOLVE_CACHED wasn't already set by application */
141 if (ret == -EAGAIN &&
142 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
143 return -EAGAIN;
144 goto err;
145 }
146
147 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
148 file->f_flags &= ~O_NONBLOCK;
149 fsnotify_open(file);
150
151 if (!fixed)
152 fd_install(ret, file);
153 else
154 ret = io_fixed_fd_install(req, issue_flags, file,
155 open->file_slot);
156 err:
157 putname(open->filename);
158 req->flags &= ~REQ_F_NEED_CLEANUP;
159 if (ret < 0)
160 req_set_fail(req);
161 io_req_set_res(req, ret, 0);
162 return IOU_OK;
163 }
164
io_openat(struct io_kiocb * req,unsigned int issue_flags)165 int io_openat(struct io_kiocb *req, unsigned int issue_flags)
166 {
167 return io_openat2(req, issue_flags);
168 }
169
io_open_cleanup(struct io_kiocb * req)170 void io_open_cleanup(struct io_kiocb *req)
171 {
172 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
173
174 if (open->filename)
175 putname(open->filename);
176 }
177
__io_close_fixed(struct io_ring_ctx * ctx,unsigned int issue_flags,unsigned int offset)178 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
179 unsigned int offset)
180 {
181 int ret;
182
183 io_ring_submit_lock(ctx, issue_flags);
184 ret = io_fixed_fd_remove(ctx, offset);
185 io_ring_submit_unlock(ctx, issue_flags);
186
187 return ret;
188 }
189
io_close_fixed(struct io_kiocb * req,unsigned int issue_flags)190 static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
191 {
192 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
193
194 return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
195 }
196
io_close_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)197 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
198 {
199 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
200
201 if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
202 return -EINVAL;
203 if (req->flags & REQ_F_FIXED_FILE)
204 return -EBADF;
205
206 close->fd = READ_ONCE(sqe->fd);
207 close->file_slot = READ_ONCE(sqe->file_index);
208 if (close->file_slot && close->fd)
209 return -EINVAL;
210
211 return 0;
212 }
213
io_close(struct io_kiocb * req,unsigned int issue_flags)214 int io_close(struct io_kiocb *req, unsigned int issue_flags)
215 {
216 struct files_struct *files = current->files;
217 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
218 struct fdtable *fdt;
219 struct file *file;
220 int ret = -EBADF;
221
222 if (close->file_slot) {
223 ret = io_close_fixed(req, issue_flags);
224 goto err;
225 }
226
227 spin_lock(&files->file_lock);
228 fdt = files_fdtable(files);
229 if (close->fd >= fdt->max_fds) {
230 spin_unlock(&files->file_lock);
231 goto err;
232 }
233 file = rcu_dereference_protected(fdt->fd[close->fd],
234 lockdep_is_held(&files->file_lock));
235 if (!file || io_is_uring_fops(file)) {
236 spin_unlock(&files->file_lock);
237 goto err;
238 }
239
240 /* if the file has a flush method, be safe and punt to async */
241 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
242 spin_unlock(&files->file_lock);
243 return -EAGAIN;
244 }
245
246 file = __close_fd_get_file(close->fd);
247 spin_unlock(&files->file_lock);
248 if (!file)
249 goto err;
250
251 /* No ->flush() or already async, safely close from here */
252 ret = filp_close(file, current->files);
253 err:
254 if (ret < 0)
255 req_set_fail(req);
256 io_req_set_res(req, ret, 0);
257 return IOU_OK;
258 }
259