Line data Source code
1 : /* SPDX-License-Identifier: MIT */
2 : #ifndef LIB_URING_H
3 : #define LIB_URING_H
4 :
5 : #ifndef _XOPEN_SOURCE
6 : #define _XOPEN_SOURCE 500 /* Required for glibc to expose sigset_t */
7 : #endif
8 :
9 : #include <sys/socket.h>
10 : #include <sys/uio.h>
11 : #include <sys/stat.h>
12 : #include <errno.h>
13 : #include <signal.h>
14 : #include <stdbool.h>
15 : #include <inttypes.h>
16 : #include <time.h>
17 : #include <sched.h>
18 : #include <linux/swab.h>
19 : #include "liburing/compat.h"
20 : #include "liburing/io_uring.h"
21 : #include "liburing/barrier.h"
22 :
23 : #ifndef uring_unlikely
24 : # define uring_unlikely(cond) __builtin_expect(!!(cond), 0)
25 : #endif
26 :
27 : #ifndef uring_likely
28 : # define uring_likely(cond) __builtin_expect(!!(cond), 1)
29 : #endif
30 :
31 : #ifdef __cplusplus
32 : extern "C" {
33 : #endif
34 :
35 : /*
36 : * Library interface to io_uring
37 : */
38 : struct io_uring_sq {
39 : unsigned *khead;
40 : unsigned *ktail;
41 : unsigned *kring_mask;
42 : unsigned *kring_entries;
43 : unsigned *kflags;
44 : unsigned *kdropped;
45 : unsigned *array;
46 : struct io_uring_sqe *sqes;
47 :
48 : unsigned sqe_head;
49 : unsigned sqe_tail;
50 :
51 : size_t ring_sz;
52 : void *ring_ptr;
53 :
54 : unsigned pad[4];
55 : };
56 :
57 : struct io_uring_cq {
58 : unsigned *khead;
59 : unsigned *ktail;
60 : unsigned *kring_mask;
61 : unsigned *kring_entries;
62 : unsigned *kflags;
63 : unsigned *koverflow;
64 : struct io_uring_cqe *cqes;
65 :
66 : size_t ring_sz;
67 : void *ring_ptr;
68 :
69 : unsigned pad[4];
70 : };
71 :
72 : struct io_uring {
73 : struct io_uring_sq sq;
74 : struct io_uring_cq cq;
75 : unsigned flags;
76 : int ring_fd;
77 :
78 : unsigned features;
79 : unsigned pad[3];
80 : };
81 :
82 : /*
83 : * Library interface
84 : */
85 :
86 : /*
87 : * return an allocated io_uring_probe structure, or NULL if probe fails (for
88 : * example, if it is not available). The caller is responsible for freeing it
89 : */
90 : struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring);
91 : /* same as io_uring_get_probe_ring, but takes care of ring init and teardown */
92 : struct io_uring_probe *io_uring_get_probe(void);
93 :
94 : /*
95 : * frees a probe allocated through io_uring_get_probe() or
96 : * io_uring_get_probe_ring()
97 : */
98 : void io_uring_free_probe(struct io_uring_probe *probe);
99 :
100 : static inline int io_uring_opcode_supported(const struct io_uring_probe *p, int op)
101 : {
102 : if (op > p->last_op)
103 : return 0;
104 : return (p->ops[op].flags & IO_URING_OP_SUPPORTED) != 0;
105 : }
106 :
107 : int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
108 : struct io_uring_params *p);
109 : int io_uring_queue_init(unsigned entries, struct io_uring *ring,
110 : unsigned flags);
111 : int io_uring_queue_mmap(int fd, struct io_uring_params *p,
112 : struct io_uring *ring);
113 : int io_uring_ring_dontfork(struct io_uring *ring);
114 : void io_uring_queue_exit(struct io_uring *ring);
115 : unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
116 : struct io_uring_cqe **cqes, unsigned count);
117 : int io_uring_wait_cqes(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
118 : unsigned wait_nr, struct __kernel_timespec *ts,
119 : sigset_t *sigmask);
120 : int io_uring_wait_cqe_timeout(struct io_uring *ring,
121 : struct io_uring_cqe **cqe_ptr,
122 : struct __kernel_timespec *ts);
123 : int io_uring_submit(struct io_uring *ring);
124 : int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr);
125 : struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
126 :
127 : int io_uring_register_buffers(struct io_uring *ring, const struct iovec *iovecs,
128 : unsigned nr_iovecs);
129 : int io_uring_register_buffers_tags(struct io_uring *ring,
130 : const struct iovec *iovecs,
131 : const __u64 *tags, unsigned nr);
132 : int io_uring_register_buffers_update_tag(struct io_uring *ring,
133 : unsigned off,
134 : const struct iovec *iovecs,
135 : const __u64 *tags, unsigned nr);
136 : int io_uring_unregister_buffers(struct io_uring *ring);
137 :
138 : int io_uring_register_files(struct io_uring *ring, const int *files,
139 : unsigned nr_files);
140 : int io_uring_register_files_tags(struct io_uring *ring, const int *files,
141 : const __u64 *tags, unsigned nr);
142 : int io_uring_register_files_update_tag(struct io_uring *ring, unsigned off,
143 : const int *files, const __u64 *tags,
144 : unsigned nr_files);
145 :
146 : int io_uring_unregister_files(struct io_uring *ring);
147 : int io_uring_register_files_update(struct io_uring *ring, unsigned off,
148 : int *files, unsigned nr_files);
149 : int io_uring_register_eventfd(struct io_uring *ring, int fd);
150 : int io_uring_register_eventfd_async(struct io_uring *ring, int fd);
151 : int io_uring_unregister_eventfd(struct io_uring *ring);
152 : int io_uring_register_probe(struct io_uring *ring, struct io_uring_probe *p,
153 : unsigned nr);
154 : int io_uring_register_personality(struct io_uring *ring);
155 : int io_uring_unregister_personality(struct io_uring *ring, int id);
156 : int io_uring_register_restrictions(struct io_uring *ring,
157 : struct io_uring_restriction *res,
158 : unsigned int nr_res);
159 : int io_uring_enable_rings(struct io_uring *ring);
160 : int __io_uring_sqring_wait(struct io_uring *ring);
161 : int io_uring_register_iowq_aff(struct io_uring *ring, size_t cpusz,
162 : const cpu_set_t *mask);
163 : int io_uring_unregister_iowq_aff(struct io_uring *ring);
164 : int io_uring_register_iowq_max_workers(struct io_uring *ring,
165 : unsigned int *values);
166 :
167 : /*
168 : * Helper for the peek/wait single cqe functions. Exported because of that,
169 : * but probably shouldn't be used directly in an application.
170 : */
171 : int __io_uring_get_cqe(struct io_uring *ring,
172 : struct io_uring_cqe **cqe_ptr, unsigned submit,
173 : unsigned wait_nr, sigset_t *sigmask);
174 :
175 : #define LIBURING_UDATA_TIMEOUT ((__u64) -1)
176 :
177 : #define io_uring_for_each_cqe(ring, head, cqe) \
178 : /* \
179 : * io_uring_smp_load_acquire() enforces the order of tail \
180 : * and CQE reads. \
181 : */ \
182 : for (head = *(ring)->cq.khead; \
183 : (cqe = (head != io_uring_smp_load_acquire((ring)->cq.ktail) ? \
184 : &(ring)->cq.cqes[head & (*(ring)->cq.kring_mask)] : NULL)); \
185 : head++) \
186 :
187 : /*
188 : * Must be called after io_uring_for_each_cqe()
189 : */
190 33514 : static inline void io_uring_cq_advance(struct io_uring *ring,
191 : unsigned nr)
192 : {
193 33514 : if (nr) {
194 16862 : struct io_uring_cq *cq = &ring->cq;
195 :
196 : /*
197 : * Ensure that the kernel only sees the new value of the head
198 : * index after the CQEs have been read.
199 : */
200 16862 : io_uring_smp_store_release(cq->khead, *cq->khead + nr);
201 : }
202 33514 : }
203 :
204 : /*
205 : * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
206 : * been processed by the application.
207 : */
208 : static inline void io_uring_cqe_seen(struct io_uring *ring,
209 : struct io_uring_cqe *cqe)
210 : {
211 : if (cqe)
212 : io_uring_cq_advance(ring, 1);
213 : }
214 :
215 : /*
216 : * Command prep helpers
217 : */
218 16862 : static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
219 : {
220 16862 : sqe->user_data = (unsigned long) data;
221 16862 : }
222 :
223 16862 : static inline void *io_uring_cqe_get_data(const struct io_uring_cqe *cqe)
224 : {
225 16862 : return (void *) (uintptr_t) cqe->user_data;
226 : }
227 :
228 : static inline void io_uring_sqe_set_flags(struct io_uring_sqe *sqe,
229 : unsigned flags)
230 : {
231 : sqe->flags = (__u8) flags;
232 : }
233 :
234 : static inline void __io_uring_set_target_fixed_file(struct io_uring_sqe *sqe,
235 : unsigned int file_index)
236 : {
237 : /* 0 means no fixed files, indexes should be encoded as "index + 1" */
238 : sqe->file_index = file_index + 1;
239 : }
240 :
241 16862 : static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
242 : const void *addr, unsigned len,
243 : __u64 offset)
244 : {
245 16862 : sqe->opcode = (__u8) op;
246 16862 : sqe->flags = 0;
247 16862 : sqe->ioprio = 0;
248 16862 : sqe->fd = fd;
249 16862 : sqe->off = offset;
250 16862 : sqe->addr = (unsigned long) addr;
251 16862 : sqe->len = len;
252 16862 : sqe->rw_flags = 0;
253 16862 : sqe->user_data = 0;
254 16862 : sqe->buf_index = 0;
255 16862 : sqe->personality = 0;
256 16862 : sqe->file_index = 0;
257 16862 : sqe->__pad2[0] = sqe->__pad2[1] = 0;
258 16862 : }
259 :
260 : /**
261 : * @pre Either fd_in or fd_out must be a pipe.
262 : * @param off_in If fd_in refers to a pipe, off_in must be (int64_t) -1;
263 : * If fd_in does not refer to a pipe and off_in is (int64_t) -1, then bytes are read
264 : * from fd_in starting from the file offset and it is adjust appropriately;
265 : * If fd_in does not refer to a pipe and off_in is not (int64_t) -1, then the
266 : * starting offset of fd_in will be off_in.
267 : * @param off_out The description of off_in also applied to off_out.
268 : * @param splice_flags see man splice(2) for description of flags.
269 : *
270 : * This splice operation can be used to implement sendfile by splicing to an intermediate pipe
271 : * first, then splice to the final destination.
272 : * In fact, the implementation of sendfile in kernel uses splice internally.
273 : *
274 : * NOTE that even if fd_in or fd_out refers to a pipe, the splice operation can still failed with
275 : * EINVAL if one of the fd doesn't explicitly support splice operation, e.g. reading from terminal
276 : * is unsupported from kernel 5.7 to 5.11.
277 : * Check issue #291 for more information.
278 : */
279 : static inline void io_uring_prep_splice(struct io_uring_sqe *sqe,
280 : int fd_in, int64_t off_in,
281 : int fd_out, int64_t off_out,
282 : unsigned int nbytes,
283 : unsigned int splice_flags)
284 : {
285 : io_uring_prep_rw(IORING_OP_SPLICE, sqe, fd_out, NULL, nbytes,
286 : (__u64) off_out);
287 : sqe->splice_off_in = (__u64) off_in;
288 : sqe->splice_fd_in = fd_in;
289 : sqe->splice_flags = splice_flags;
290 : }
291 :
292 : static inline void io_uring_prep_tee(struct io_uring_sqe *sqe,
293 : int fd_in, int fd_out,
294 : unsigned int nbytes,
295 : unsigned int splice_flags)
296 : {
297 : io_uring_prep_rw(IORING_OP_TEE, sqe, fd_out, NULL, nbytes, 0);
298 : sqe->splice_off_in = 0;
299 : sqe->splice_fd_in = fd_in;
300 : sqe->splice_flags = splice_flags;
301 : }
302 :
303 208 : static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
304 : const struct iovec *iovecs,
305 : unsigned nr_vecs, __u64 offset)
306 : {
307 208 : io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
308 208 : }
309 :
310 : static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
311 : void *buf, unsigned nbytes,
312 : __u64 offset, int buf_index)
313 : {
314 : io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
315 : sqe->buf_index = (__u16) buf_index;
316 : }
317 :
318 16652 : static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
319 : const struct iovec *iovecs,
320 : unsigned nr_vecs, __u64 offset)
321 : {
322 16652 : io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
323 16652 : }
324 :
325 : static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
326 : const void *buf, unsigned nbytes,
327 : __u64 offset, int buf_index)
328 : {
329 : io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
330 : sqe->buf_index = (__u16) buf_index;
331 : }
332 :
333 : static inline void io_uring_prep_recvmsg(struct io_uring_sqe *sqe, int fd,
334 : struct msghdr *msg, unsigned flags)
335 : {
336 : io_uring_prep_rw(IORING_OP_RECVMSG, sqe, fd, msg, 1, 0);
337 : sqe->msg_flags = flags;
338 : }
339 :
340 : static inline void io_uring_prep_sendmsg(struct io_uring_sqe *sqe, int fd,
341 : const struct msghdr *msg, unsigned flags)
342 : {
343 : io_uring_prep_rw(IORING_OP_SENDMSG, sqe, fd, msg, 1, 0);
344 : sqe->msg_flags = flags;
345 : }
346 :
347 : static inline unsigned __io_uring_prep_poll_mask(unsigned poll_mask)
348 : {
349 : #if __BYTE_ORDER == __BIG_ENDIAN
350 : poll_mask = __swahw32(poll_mask);
351 : #endif
352 : return poll_mask;
353 : }
354 :
355 : static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
356 : unsigned poll_mask)
357 : {
358 : io_uring_prep_rw(IORING_OP_POLL_ADD, sqe, fd, NULL, 0, 0);
359 : sqe->poll32_events = __io_uring_prep_poll_mask(poll_mask);
360 : }
361 :
362 : static inline void io_uring_prep_poll_multishot(struct io_uring_sqe *sqe,
363 : int fd, unsigned poll_mask)
364 : {
365 : io_uring_prep_poll_add(sqe, fd, poll_mask);
366 : sqe->len = IORING_POLL_ADD_MULTI;
367 : }
368 :
369 : static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
370 : void *user_data)
371 : {
372 : io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, user_data, 0, 0);
373 : }
374 :
375 : static inline void io_uring_prep_poll_update(struct io_uring_sqe *sqe,
376 : void *old_user_data,
377 : void *new_user_data,
378 : unsigned poll_mask, unsigned flags)
379 : {
380 : io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, old_user_data, flags,
381 : (__u64)(uintptr_t)new_user_data);
382 : sqe->poll32_events = __io_uring_prep_poll_mask(poll_mask);
383 : }
384 :
385 2 : static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
386 : unsigned fsync_flags)
387 : {
388 2 : io_uring_prep_rw(IORING_OP_FSYNC, sqe, fd, NULL, 0, 0);
389 2 : sqe->fsync_flags = fsync_flags;
390 2 : }
391 :
392 : static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
393 : {
394 : io_uring_prep_rw(IORING_OP_NOP, sqe, -1, NULL, 0, 0);
395 : }
396 :
397 : static inline void io_uring_prep_timeout(struct io_uring_sqe *sqe,
398 : struct __kernel_timespec *ts,
399 : unsigned count, unsigned flags)
400 : {
401 : io_uring_prep_rw(IORING_OP_TIMEOUT, sqe, -1, ts, 1, count);
402 : sqe->timeout_flags = flags;
403 : }
404 :
405 : static inline void io_uring_prep_timeout_remove(struct io_uring_sqe *sqe,
406 : __u64 user_data, unsigned flags)
407 : {
408 : io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1,
409 : (void *)(unsigned long)user_data, 0, 0);
410 : sqe->timeout_flags = flags;
411 : }
412 :
413 : static inline void io_uring_prep_timeout_update(struct io_uring_sqe *sqe,
414 : struct __kernel_timespec *ts,
415 : __u64 user_data, unsigned flags)
416 : {
417 : io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1,
418 : (void *)(unsigned long)user_data, 0,
419 : (uintptr_t)ts);
420 : sqe->timeout_flags = flags | IORING_TIMEOUT_UPDATE;
421 : }
422 :
423 : static inline void io_uring_prep_accept(struct io_uring_sqe *sqe, int fd,
424 : struct sockaddr *addr,
425 : socklen_t *addrlen, int flags)
426 : {
427 : io_uring_prep_rw(IORING_OP_ACCEPT, sqe, fd, addr, 0,
428 : (__u64) (unsigned long) addrlen);
429 : sqe->accept_flags = (__u32) flags;
430 : }
431 :
432 : /* accept directly into the fixed file table */
433 : static inline void io_uring_prep_accept_direct(struct io_uring_sqe *sqe, int fd,
434 : struct sockaddr *addr,
435 : socklen_t *addrlen, int flags,
436 : unsigned int file_index)
437 : {
438 : io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
439 : __io_uring_set_target_fixed_file(sqe, file_index);
440 : }
441 :
442 : static inline void io_uring_prep_cancel(struct io_uring_sqe *sqe, void *user_data,
443 : int flags)
444 : {
445 : io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, -1, user_data, 0, 0);
446 : sqe->cancel_flags = (__u32) flags;
447 : }
448 :
449 : static inline void io_uring_prep_link_timeout(struct io_uring_sqe *sqe,
450 : struct __kernel_timespec *ts,
451 : unsigned flags)
452 : {
453 : io_uring_prep_rw(IORING_OP_LINK_TIMEOUT, sqe, -1, ts, 1, 0);
454 : sqe->timeout_flags = flags;
455 : }
456 :
457 : static inline void io_uring_prep_connect(struct io_uring_sqe *sqe, int fd,
458 : const struct sockaddr *addr,
459 : socklen_t addrlen)
460 : {
461 : io_uring_prep_rw(IORING_OP_CONNECT, sqe, fd, addr, 0, addrlen);
462 : }
463 :
464 : static inline void io_uring_prep_files_update(struct io_uring_sqe *sqe,
465 : int *fds, unsigned nr_fds,
466 : int offset)
467 : {
468 : io_uring_prep_rw(IORING_OP_FILES_UPDATE, sqe, -1, fds, nr_fds,
469 : (__u64) offset);
470 : }
471 :
472 : static inline void io_uring_prep_fallocate(struct io_uring_sqe *sqe, int fd,
473 : int mode, off_t offset, off_t len)
474 : {
475 :
476 : io_uring_prep_rw(IORING_OP_FALLOCATE, sqe, fd,
477 : (const uintptr_t *) (unsigned long) len,
478 : (unsigned int) mode, (__u64) offset);
479 : }
480 :
481 : static inline void io_uring_prep_openat(struct io_uring_sqe *sqe, int dfd,
482 : const char *path, int flags, mode_t mode)
483 : {
484 : io_uring_prep_rw(IORING_OP_OPENAT, sqe, dfd, path, mode, 0);
485 : sqe->open_flags = (__u32) flags;
486 : }
487 :
488 : /* open directly into the fixed file table */
489 : static inline void io_uring_prep_openat_direct(struct io_uring_sqe *sqe,
490 : int dfd, const char *path,
491 : int flags, mode_t mode,
492 : unsigned file_index)
493 : {
494 : io_uring_prep_openat(sqe, dfd, path, flags, mode);
495 : __io_uring_set_target_fixed_file(sqe, file_index);
496 : }
497 :
498 :
499 : static inline void io_uring_prep_close(struct io_uring_sqe *sqe, int fd)
500 : {
501 : io_uring_prep_rw(IORING_OP_CLOSE, sqe, fd, NULL, 0, 0);
502 : }
503 :
504 : static inline void io_uring_prep_read(struct io_uring_sqe *sqe, int fd,
505 : void *buf, unsigned nbytes, __u64 offset)
506 : {
507 : io_uring_prep_rw(IORING_OP_READ, sqe, fd, buf, nbytes, offset);
508 : }
509 :
510 : static inline void io_uring_prep_write(struct io_uring_sqe *sqe, int fd,
511 : const void *buf, unsigned nbytes, __u64 offset)
512 : {
513 : io_uring_prep_rw(IORING_OP_WRITE, sqe, fd, buf, nbytes, offset);
514 : }
515 :
516 : struct statx;
517 : static inline void io_uring_prep_statx(struct io_uring_sqe *sqe, int dfd,
518 : const char *path, int flags, unsigned mask,
519 : struct statx *statxbuf)
520 : {
521 : io_uring_prep_rw(IORING_OP_STATX, sqe, dfd, path, mask,
522 : (__u64) (unsigned long) statxbuf);
523 : sqe->statx_flags = (__u32) flags;
524 : }
525 :
526 : static inline void io_uring_prep_fadvise(struct io_uring_sqe *sqe, int fd,
527 : __u64 offset, off_t len, int advice)
528 : {
529 : io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, NULL, (__u32) len, offset);
530 : sqe->fadvise_advice = (__u32) advice;
531 : }
532 :
533 : static inline void io_uring_prep_madvise(struct io_uring_sqe *sqe, void *addr,
534 : off_t length, int advice)
535 : {
536 : io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, (__u32) length, 0);
537 : sqe->fadvise_advice = (__u32) advice;
538 : }
539 :
540 : static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd,
541 : const void *buf, size_t len, int flags)
542 : {
543 : io_uring_prep_rw(IORING_OP_SEND, sqe, sockfd, buf, (__u32) len, 0);
544 : sqe->msg_flags = (__u32) flags;
545 : }
546 :
547 : static inline void io_uring_prep_recv(struct io_uring_sqe *sqe, int sockfd,
548 : void *buf, size_t len, int flags)
549 : {
550 : io_uring_prep_rw(IORING_OP_RECV, sqe, sockfd, buf, (__u32) len, 0);
551 : sqe->msg_flags = (__u32) flags;
552 : }
553 :
554 : static inline void io_uring_prep_openat2(struct io_uring_sqe *sqe, int dfd,
555 : const char *path, struct open_how *how)
556 : {
557 : io_uring_prep_rw(IORING_OP_OPENAT2, sqe, dfd, path, sizeof(*how),
558 : (uint64_t) (uintptr_t) how);
559 : }
560 :
561 : /* open directly into the fixed file table */
562 : static inline void io_uring_prep_openat2_direct(struct io_uring_sqe *sqe,
563 : int dfd, const char *path,
564 : struct open_how *how,
565 : unsigned file_index)
566 : {
567 : io_uring_prep_openat2(sqe, dfd, path, how);
568 : __io_uring_set_target_fixed_file(sqe, file_index);
569 : }
570 :
571 : struct epoll_event;
572 : static inline void io_uring_prep_epoll_ctl(struct io_uring_sqe *sqe, int epfd,
573 : int fd, int op,
574 : struct epoll_event *ev)
575 : {
576 : io_uring_prep_rw(IORING_OP_EPOLL_CTL, sqe, epfd, ev,
577 : (__u32) op, (__u32) fd);
578 : }
579 :
580 : static inline void io_uring_prep_provide_buffers(struct io_uring_sqe *sqe,
581 : void *addr, int len, int nr,
582 : int bgid, int bid)
583 : {
584 : io_uring_prep_rw(IORING_OP_PROVIDE_BUFFERS, sqe, nr, addr, (__u32) len,
585 : (__u64) bid);
586 : sqe->buf_group = (__u16) bgid;
587 : }
588 :
589 : static inline void io_uring_prep_remove_buffers(struct io_uring_sqe *sqe,
590 : int nr, int bgid)
591 : {
592 : io_uring_prep_rw(IORING_OP_REMOVE_BUFFERS, sqe, nr, NULL, 0, 0);
593 : sqe->buf_group = (__u16) bgid;
594 : }
595 :
596 : static inline void io_uring_prep_shutdown(struct io_uring_sqe *sqe, int fd,
597 : int how)
598 : {
599 : io_uring_prep_rw(IORING_OP_SHUTDOWN, sqe, fd, NULL, (__u32) how, 0);
600 : }
601 :
602 : static inline void io_uring_prep_unlinkat(struct io_uring_sqe *sqe, int dfd,
603 : const char *path, int flags)
604 : {
605 : io_uring_prep_rw(IORING_OP_UNLINKAT, sqe, dfd, path, 0, 0);
606 : sqe->unlink_flags = (__u32) flags;
607 : }
608 :
609 : static inline void io_uring_prep_renameat(struct io_uring_sqe *sqe, int olddfd,
610 : const char *oldpath, int newdfd,
611 : const char *newpath, int flags)
612 : {
613 : io_uring_prep_rw(IORING_OP_RENAMEAT, sqe, olddfd, oldpath, (__u32) newdfd,
614 : (uint64_t) (uintptr_t) newpath);
615 : sqe->rename_flags = (__u32) flags;
616 : }
617 :
618 : static inline void io_uring_prep_sync_file_range(struct io_uring_sqe *sqe,
619 : int fd, unsigned len,
620 : __u64 offset, int flags)
621 : {
622 : io_uring_prep_rw(IORING_OP_SYNC_FILE_RANGE, sqe, fd, NULL, len, offset);
623 : sqe->sync_range_flags = (__u32) flags;
624 : }
625 :
626 : static inline void io_uring_prep_mkdirat(struct io_uring_sqe *sqe, int dfd,
627 : const char *path, mode_t mode)
628 : {
629 : io_uring_prep_rw(IORING_OP_MKDIRAT, sqe, dfd, path, mode, 0);
630 : }
631 :
632 : static inline void io_uring_prep_symlinkat(struct io_uring_sqe *sqe,
633 : const char *target, int newdirfd, const char *linkpath)
634 : {
635 : io_uring_prep_rw(IORING_OP_SYMLINKAT, sqe, newdirfd, target, 0,
636 : (uint64_t) (uintptr_t) linkpath);
637 : }
638 :
639 : static inline void io_uring_prep_linkat(struct io_uring_sqe *sqe, int olddfd,
640 : const char *oldpath, int newdfd,
641 : const char *newpath, int flags)
642 : {
643 : io_uring_prep_rw(IORING_OP_LINKAT, sqe, olddfd, oldpath, (__u32) newdfd,
644 : (uint64_t) (uintptr_t) newpath);
645 : sqe->hardlink_flags = (__u32) flags;
646 : }
647 :
648 : /*
649 : * Returns number of unconsumed (if SQPOLL) or unsubmitted entries exist in
650 : * the SQ ring
651 : */
652 : static inline unsigned io_uring_sq_ready(const struct io_uring *ring)
653 : {
654 : /*
655 : * Without a barrier, we could miss an update and think the SQ wasn't ready.
656 : * We don't need the load acquire for non-SQPOLL since then we drive updates.
657 : */
658 : if (ring->flags & IORING_SETUP_SQPOLL)
659 : return ring->sq.sqe_tail - io_uring_smp_load_acquire(ring->sq.khead);
660 :
661 : /* always use real head, to avoid losing sync for short submit */
662 : return ring->sq.sqe_tail - *ring->sq.khead;
663 : }
664 :
665 : /*
666 : * Returns how much space is left in the SQ ring.
667 : */
668 : static inline unsigned io_uring_sq_space_left(const struct io_uring *ring)
669 : {
670 : return *ring->sq.kring_entries - io_uring_sq_ready(ring);
671 : }
672 :
673 : /*
674 : * Only applicable when using SQPOLL - allows the caller to wait for space
675 : * to free up in the SQ ring, which happens when the kernel side thread has
676 : * consumed one or more entries. If the SQ ring is currently non-full, no
677 : * action is taken. Note: may return -EINVAL if the kernel doesn't support
678 : * this feature.
679 : */
680 : static inline int io_uring_sqring_wait(struct io_uring *ring)
681 : {
682 : if (!(ring->flags & IORING_SETUP_SQPOLL))
683 : return 0;
684 : if (io_uring_sq_space_left(ring))
685 : return 0;
686 :
687 : return __io_uring_sqring_wait(ring);
688 : }
689 :
690 : /*
691 : * Returns how many unconsumed entries are ready in the CQ ring
692 : */
693 : static inline unsigned io_uring_cq_ready(const struct io_uring *ring)
694 : {
695 : return io_uring_smp_load_acquire(ring->cq.ktail) - *ring->cq.khead;
696 : }
697 :
698 : /*
699 : * Returns true if the eventfd notification is currently enabled
700 : */
701 : static inline bool io_uring_cq_eventfd_enabled(const struct io_uring *ring)
702 : {
703 : if (!ring->cq.kflags)
704 : return true;
705 :
706 : return !(*ring->cq.kflags & IORING_CQ_EVENTFD_DISABLED);
707 : }
708 :
709 : /*
710 : * Toggle eventfd notification on or off, if an eventfd is registered with
711 : * the ring.
712 : */
713 : static inline int io_uring_cq_eventfd_toggle(struct io_uring *ring,
714 : bool enabled)
715 : {
716 : uint32_t flags;
717 :
718 : if (!!enabled == io_uring_cq_eventfd_enabled(ring))
719 : return 0;
720 :
721 : if (!ring->cq.kflags)
722 : return -EOPNOTSUPP;
723 :
724 : flags = *ring->cq.kflags;
725 :
726 : if (enabled)
727 : flags &= ~IORING_CQ_EVENTFD_DISABLED;
728 : else
729 : flags |= IORING_CQ_EVENTFD_DISABLED;
730 :
731 : IO_URING_WRITE_ONCE(*ring->cq.kflags, flags);
732 :
733 : return 0;
734 : }
735 :
736 : /*
737 : * Return an IO completion, waiting for 'wait_nr' completions if one isn't
738 : * readily available. Returns 0 with cqe_ptr filled in on success, -errno on
739 : * failure.
740 : */
741 : static inline int io_uring_wait_cqe_nr(struct io_uring *ring,
742 : struct io_uring_cqe **cqe_ptr,
743 : unsigned wait_nr)
744 : {
745 : return __io_uring_get_cqe(ring, cqe_ptr, 0, wait_nr, NULL);
746 : }
747 :
748 : /*
749 : * Return an IO completion, if one is readily available. Returns 0 with
750 : * cqe_ptr filled in on success, -errno on failure.
751 : */
752 : static inline int io_uring_peek_cqe(struct io_uring *ring,
753 : struct io_uring_cqe **cqe_ptr)
754 : {
755 : return io_uring_wait_cqe_nr(ring, cqe_ptr, 0);
756 : }
757 :
758 : /*
759 : * Return an IO completion, waiting for it if necessary. Returns 0 with
760 : * cqe_ptr filled in on success, -errno on failure.
761 : */
762 : static inline int io_uring_wait_cqe(struct io_uring *ring,
763 : struct io_uring_cqe **cqe_ptr)
764 : {
765 : return io_uring_wait_cqe_nr(ring, cqe_ptr, 1);
766 : }
767 :
768 : ssize_t io_uring_mlock_size(unsigned entries, unsigned flags);
769 : ssize_t io_uring_mlock_size_params(unsigned entries, struct io_uring_params *p);
770 :
771 : #ifdef __cplusplus
772 : }
773 : #endif
774 :
775 : #endif
|