Zephyr API Documentation 4.0.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
rtio.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2022 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
26#ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27#define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28
29#include <string.h>
30
32#include <zephyr/device.h>
33#include <zephyr/kernel.h>
34#include <zephyr/sys/__assert.h>
35#include <zephyr/sys/atomic.h>
37#include <zephyr/sys/util.h>
40
41#ifdef __cplusplus
42extern "C" {
43#endif
44
45
65#define RTIO_PRIO_LOW 0U
66
70#define RTIO_PRIO_NORM 127U
71
75#define RTIO_PRIO_HIGH 255U
76
96#define RTIO_SQE_CHAINED BIT(0)
97
108#define RTIO_SQE_TRANSACTION BIT(1)
109
110
120#define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
121
128#define RTIO_SQE_CANCELED BIT(3)
129
136#define RTIO_SQE_MULTISHOT BIT(4)
137
141#define RTIO_SQE_NO_RESPONSE BIT(5)
142
160#define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
161
162#define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
163
170#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
171
178#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
179
187#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt) \
188 (FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) | \
189 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
190
198#define RTIO_IODEV_I2C_STOP BIT(1)
199
203#define RTIO_IODEV_I2C_RESTART BIT(2)
204
208#define RTIO_IODEV_I2C_10_BITS BIT(3)
209
211struct rtio;
212struct rtio_cqe;
213struct rtio_sqe;
214struct rtio_sqe_pool;
215struct rtio_cqe_pool;
216struct rtio_iodev;
217struct rtio_iodev_sqe;
227typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
228
232struct rtio_sqe {
241 uint16_t _resv0;
242
243 const struct rtio_iodev *iodev;
252 void *userdata;
253
254 union {
255
257 struct {
259 const uint8_t *buf;
260 } tx;
261
263 struct {
264 uint32_t buf_len;
266 } rx;
267
269 struct {
271 uint8_t buf[7];
272 } tiny_tx;
273
275 struct {
277 void *arg0;
278 } callback;
279
281 struct {
282 uint32_t buf_len;
285 } txrx;
286
289 };
290};
291
293/* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
294BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
307
314
321
333struct rtio {
334#ifdef CONFIG_RTIO_SUBMIT_SEM
335 /* A wait semaphore which may suspend the calling thread
336 * to wait for some number of completions when calling submit
337 */
338 struct k_sem *submit_sem;
339
340 uint32_t submit_count;
341#endif
342
343#ifdef CONFIG_RTIO_CONSUME_SEM
344 /* A wait semaphore which may suspend the calling thread
345 * to wait for some number of completions while consuming
346 * them from the completion queue
347 */
348 struct k_sem *consume_sem;
349#endif
350
351 /* Total number of completions */
353
354 /* Number of completions that were unable to be submitted with results
355 * due to the cq spsc being full
356 */
358
359 /* Submission queue object pool with free list */
361
362 /* Complete queue object pool with free list */
364
365#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
366 /* Mem block pool */
367 struct sys_mem_blocks *block_pool;
368#endif
369
370 /* Submission queue */
371 struct mpsc sq;
372
373 /* Completion queue */
374 struct mpsc cq;
375};
376
378extern struct k_mem_partition rtio_partition;
379
387static inline size_t rtio_mempool_block_size(const struct rtio *r)
388{
389#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
390 ARG_UNUSED(r);
391 return 0;
392#else
393 if (r == NULL || r->block_pool == NULL) {
394 return 0;
395 }
396 return BIT(r->block_pool->info.blk_sz_shift);
397#endif
398}
399
407#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
408static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
409{
410 uintptr_t addr = (uintptr_t)ptr;
411 struct sys_mem_blocks *mem_pool = r->block_pool;
412 uint32_t block_size = rtio_mempool_block_size(r);
413
414 uintptr_t buff = (uintptr_t)mem_pool->buffer;
415 uint32_t buff_size = mem_pool->info.num_blocks * block_size;
416
417 if (addr < buff || addr >= buff + buff_size) {
418 return UINT16_MAX;
419 }
420 return (addr - buff) / block_size;
421}
422#endif
423
430 struct rtio_sqe sqe;
431 struct mpsc_node q;
433 struct rtio *r;
434};
435
448 void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
449};
450
455 /* Function pointer table */
456 const struct rtio_iodev_api *api;
457
458 /* Data associated with this iodev */
459 void *data;
460};
461
463#define RTIO_OP_NOP 0
464
466#define RTIO_OP_RX (RTIO_OP_NOP+1)
467
469#define RTIO_OP_TX (RTIO_OP_RX+1)
470
472#define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
473
475#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
476
478#define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
479
481#define RTIO_OP_I2C_RECOVER (RTIO_OP_TXRX+1)
482
484#define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1)
485
489static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
490 const struct rtio_iodev *iodev,
491 void *userdata)
492{
493 memset(sqe, 0, sizeof(struct rtio_sqe));
494 sqe->op = RTIO_OP_NOP;
495 sqe->iodev = iodev;
496 sqe->userdata = userdata;
497}
498
502static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
503 const struct rtio_iodev *iodev,
504 int8_t prio,
505 uint8_t *buf,
506 uint32_t len,
507 void *userdata)
508{
509 memset(sqe, 0, sizeof(struct rtio_sqe));
510 sqe->op = RTIO_OP_RX;
511 sqe->prio = prio;
512 sqe->iodev = iodev;
513 sqe->rx.buf_len = len;
514 sqe->rx.buf = buf;
515 sqe->userdata = userdata;
516}
517
523static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
524 const struct rtio_iodev *iodev, int8_t prio,
525 void *userdata)
526{
527 rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
529}
530
531static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
532 const struct rtio_iodev *iodev, int8_t prio,
533 void *userdata)
534{
535 rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
537}
538
542static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
543 const struct rtio_iodev *iodev,
544 int8_t prio,
545 const uint8_t *buf,
546 uint32_t len,
547 void *userdata)
548{
549 memset(sqe, 0, sizeof(struct rtio_sqe));
550 sqe->op = RTIO_OP_TX;
551 sqe->prio = prio;
552 sqe->iodev = iodev;
553 sqe->tx.buf_len = len;
554 sqe->tx.buf = buf;
555 sqe->userdata = userdata;
556}
557
568static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
569 const struct rtio_iodev *iodev,
570 int8_t prio,
571 const uint8_t *tiny_write_data,
572 uint8_t tiny_write_len,
573 void *userdata)
574{
575 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf));
576
577 memset(sqe, 0, sizeof(struct rtio_sqe));
578 sqe->op = RTIO_OP_TINY_TX;
579 sqe->prio = prio;
580 sqe->iodev = iodev;
581 sqe->tiny_tx.buf_len = tiny_write_len;
582 memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len);
583 sqe->userdata = userdata;
584}
585
594static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
595 rtio_callback_t callback,
596 void *arg0,
597 void *userdata)
598{
599 memset(sqe, 0, sizeof(struct rtio_sqe));
600 sqe->op = RTIO_OP_CALLBACK;
601 sqe->prio = 0;
602 sqe->iodev = NULL;
603 sqe->callback.callback = callback;
604 sqe->callback.arg0 = arg0;
605 sqe->userdata = userdata;
606}
607
618static inline void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe,
619 rtio_callback_t callback,
620 void *arg0,
621 void *userdata)
622{
623 rtio_sqe_prep_callback(sqe, callback, arg0, userdata);
625}
626
630static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
631 const struct rtio_iodev *iodev,
632 int8_t prio,
633 const uint8_t *tx_buf,
634 uint8_t *rx_buf,
635 uint32_t buf_len,
636 void *userdata)
637{
638 memset(sqe, 0, sizeof(struct rtio_sqe));
639 sqe->op = RTIO_OP_TXRX;
640 sqe->prio = prio;
641 sqe->iodev = iodev;
642 sqe->txrx.buf_len = buf_len;
643 sqe->txrx.tx_buf = tx_buf;
644 sqe->txrx.rx_buf = rx_buf;
645 sqe->userdata = userdata;
646}
647
648static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
649{
650 struct mpsc_node *node = mpsc_pop(&pool->free_q);
651
652 if (node == NULL) {
653 return NULL;
654 }
655
656 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
657
658 pool->pool_free--;
659
660 return iodev_sqe;
661}
662
663static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
664{
665 mpsc_push(&pool->free_q, &iodev_sqe->q);
666
667 pool->pool_free++;
668}
669
670static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
671{
672 struct mpsc_node *node = mpsc_pop(&pool->free_q);
673
674 if (node == NULL) {
675 return NULL;
676 }
677
678 struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
679
680 memset(cqe, 0, sizeof(struct rtio_cqe));
681
682 pool->pool_free--;
683
684 return cqe;
685}
686
687static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
688{
689 mpsc_push(&pool->free_q, &cqe->q);
690
691 pool->pool_free++;
692}
693
694static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
695 size_t max_sz, uint8_t **buf, uint32_t *buf_len)
696{
697#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
698 ARG_UNUSED(r);
699 ARG_UNUSED(min_sz);
700 ARG_UNUSED(max_sz);
701 ARG_UNUSED(buf);
702 ARG_UNUSED(buf_len);
703 return -ENOTSUP;
704#else
705 const uint32_t block_size = rtio_mempool_block_size(r);
706 uint32_t bytes = max_sz;
707
708 /* Not every context has a block pool and the block size may return 0 in
709 * that case
710 */
711 if (block_size == 0) {
712 return -ENOMEM;
713 }
714
715 do {
716 size_t num_blks = DIV_ROUND_UP(bytes, block_size);
717 int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
718
719 if (rc == 0) {
720 *buf_len = num_blks * block_size;
721 return 0;
722 }
723
724 if (bytes <= block_size) {
725 break;
726 }
727
728 bytes -= block_size;
729 } while (bytes >= min_sz);
730
731 return -ENOMEM;
732#endif
733}
734
735static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
736{
737#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
738 ARG_UNUSED(r);
739 ARG_UNUSED(buf);
740 ARG_UNUSED(buf_len);
741#else
742 size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
743
744 sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
745#endif
746}
747
748/* Do not try and reformat the macros */
749/* clang-format off */
750
758#define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data) \
759 STRUCT_SECTION_ITERABLE(rtio_iodev, name) = { \
760 .api = (iodev_api), \
761 .data = (iodev_data), \
762 }
763
764#define Z_RTIO_SQE_POOL_DEFINE(name, sz) \
765 static struct rtio_iodev_sqe CONCAT(_sqe_pool_, name)[sz]; \
766 STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = { \
767 .free_q = MPSC_INIT((name.free_q)), \
768 .pool_size = sz, \
769 .pool_free = sz, \
770 .pool = CONCAT(_sqe_pool_, name), \
771 }
772
773
774#define Z_RTIO_CQE_POOL_DEFINE(name, sz) \
775 static struct rtio_cqe CONCAT(_cqe_pool_, name)[sz]; \
776 STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = { \
777 .free_q = MPSC_INIT((name.free_q)), \
778 .pool_size = sz, \
779 .pool_free = sz, \
780 .pool = CONCAT(_cqe_pool_, name), \
781 }
782
792#define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
793
803#define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
804
805#define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align) \
806 RTIO_BMEM uint8_t __aligned(WB_UP(blk_align)) \
807 CONCAT(_block_pool_, name)[blk_cnt*WB_UP(blk_sz)]; \
808 _SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt, \
809 CONCAT(_block_pool_, name), RTIO_DMEM)
810
811#define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool) \
812 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, \
813 (static K_SEM_DEFINE(CONCAT(_submit_sem_, name), 0, K_SEM_MAX_LIMIT))) \
814 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, \
815 (static K_SEM_DEFINE(CONCAT(_consume_sem_, name), 0, K_SEM_MAX_LIMIT))) \
816 STRUCT_SECTION_ITERABLE(rtio, name) = { \
817 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &CONCAT(_submit_sem_, name),)) \
818 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,)) \
819 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &CONCAT(_consume_sem_, name),))\
820 .cq_count = ATOMIC_INIT(0), \
821 .xcqcnt = ATOMIC_INIT(0), \
822 .sqe_pool = _sqe_pool, \
823 .cqe_pool = _cqe_pool, \
824 IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,)) \
825 .sq = MPSC_INIT((name.sq)), \
826 .cq = MPSC_INIT((name.cq)), \
827 }
828
836#define RTIO_DEFINE(name, sq_sz, cq_sz) \
837 Z_RTIO_SQE_POOL_DEFINE(CONCAT(name, _sqe_pool), sq_sz); \
838 Z_RTIO_CQE_POOL_DEFINE(CONCAT(name, _cqe_pool), cq_sz); \
839 Z_RTIO_DEFINE(name, &CONCAT(name, _sqe_pool), \
840 &CONCAT(name, _cqe_pool), NULL)
841
842/* clang-format on */
843
854#define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
855 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
856 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
857 Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
858 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
859
860/* clang-format on */
861
869static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
870{
871 return r->sqe_pool->pool_free;
872}
873
882static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
883{
884 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
885 return iodev_sqe->next;
886 } else {
887 return NULL;
888 }
889}
890
891
900static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
901{
902 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
903 return iodev_sqe->next;
904 } else {
905 return NULL;
906 }
907}
908
917static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
918{
919 return iodev_sqe->next;
920}
921
930static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
931{
932 struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
933
934 if (iodev_sqe == NULL) {
935 return NULL;
936 }
937
938 mpsc_push(&r->sq, &iodev_sqe->q);
939
940 return &iodev_sqe->sqe;
941}
942
948static inline void rtio_sqe_drop_all(struct rtio *r)
949{
950 struct rtio_iodev_sqe *iodev_sqe;
951 struct mpsc_node *node = mpsc_pop(&r->sq);
952
953 while (node != NULL) {
954 iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
955 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
956 node = mpsc_pop(&r->sq);
957 }
958}
959
963static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
964{
965 struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
966
967 if (cqe == NULL) {
968 return NULL;
969 }
970
971 memset(cqe, 0, sizeof(struct rtio_cqe));
972
973 return cqe;
974}
975
979static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
980{
981 mpsc_push(&r->cq, &cqe->q);
982}
983
995static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
996{
997 struct mpsc_node *node;
998 struct rtio_cqe *cqe = NULL;
999
1000#ifdef CONFIG_RTIO_CONSUME_SEM
1001 if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
1002 return NULL;
1003 }
1004#endif
1005
1006 node = mpsc_pop(&r->cq);
1007 if (node == NULL) {
1008 return NULL;
1009 }
1010 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1011
1012 return cqe;
1013}
1014
1025static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
1026{
1027 struct mpsc_node *node;
1028 struct rtio_cqe *cqe;
1029
1030#ifdef CONFIG_RTIO_CONSUME_SEM
1031 k_sem_take(r->consume_sem, K_FOREVER);
1032#endif
1033 node = mpsc_pop(&r->cq);
1034 while (node == NULL) {
1035 Z_SPIN_DELAY(1);
1036 node = mpsc_pop(&r->cq);
1037 }
1038 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1039
1040 return cqe;
1041}
1042
1049static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1050{
1051 rtio_cqe_pool_free(r->cqe_pool, cqe);
1052}
1053
1060static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1061{
1062 uint32_t flags = 0;
1063
1064#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1065 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1066 struct rtio *r = iodev_sqe->r;
1067 struct sys_mem_blocks *mem_pool = r->block_pool;
1068 int blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >>
1069 mem_pool->info.blk_sz_shift;
1070 int blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift;
1071
1072 flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1073 }
1074#else
1075 ARG_UNUSED(iodev_sqe);
1076#endif
1077
1078 return flags;
1079}
1080
1096__syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1097 uint8_t **buff, uint32_t *buff_len);
1098
1099static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1100 uint8_t **buff, uint32_t *buff_len)
1101{
1102#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1104 int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1105 int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1107
1108 *buff = r->block_pool->buffer + blk_idx * blk_size;
1109 *buff_len = blk_count * blk_size;
1110 __ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
1111 __ASSERT_NO_MSG(*buff <
1112 r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
1113 return 0;
1114 }
1115 return -EINVAL;
1116#else
1117 ARG_UNUSED(r);
1118 ARG_UNUSED(cqe);
1119 ARG_UNUSED(buff);
1120 ARG_UNUSED(buff_len);
1121
1122 return -ENOTSUP;
1123#endif
1124}
1125
1127void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1128void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1129
1138static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1139{
1140 rtio_executor_ok(iodev_sqe, result);
1141}
1142
1151static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1152{
1153 rtio_executor_err(iodev_sqe, result);
1154}
1155
1167static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1168{
1169 struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1170
1171 if (cqe == NULL) {
1172 atomic_inc(&r->xcqcnt);
1173 } else {
1174 cqe->result = result;
1175 cqe->userdata = userdata;
1176 cqe->flags = flags;
1177 rtio_cqe_produce(r, cqe);
1178 }
1179
1180 atomic_inc(&r->cq_count);
1181#ifdef CONFIG_RTIO_SUBMIT_SEM
1182 if (r->submit_count > 0) {
1183 r->submit_count--;
1184 if (r->submit_count == 0) {
1185 k_sem_give(r->submit_sem);
1186 }
1187 }
1188#endif
1189#ifdef CONFIG_RTIO_CONSUME_SEM
1190 k_sem_give(r->consume_sem);
1191#endif
1192}
1193
1194#define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1195
1208static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1209 uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1210{
1211 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1212
1213#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1214 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1215 struct rtio *r = iodev_sqe->r;
1216
1217 if (sqe->rx.buf != NULL) {
1218 if (sqe->rx.buf_len < min_buf_len) {
1219 return -ENOMEM;
1220 }
1221 *buf = sqe->rx.buf;
1222 *buf_len = sqe->rx.buf_len;
1223 return 0;
1224 }
1225
1226 int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
1227 if (rc == 0) {
1228 sqe->rx.buf = *buf;
1229 sqe->rx.buf_len = *buf_len;
1230 return 0;
1231 }
1232
1233 return -ENOMEM;
1234 }
1235#else
1236 ARG_UNUSED(max_buf_len);
1237#endif
1238
1239 if (sqe->rx.buf_len < min_buf_len) {
1240 return -ENOMEM;
1241 }
1242
1243 *buf = sqe->rx.buf;
1244 *buf_len = sqe->rx.buf_len;
1245 return 0;
1246}
1247
1262__syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1263
1264static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1265{
1266#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1267 if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1268 return;
1269 }
1270
1271 rtio_block_pool_free(r, buff, buff_len);
1272#else
1273 ARG_UNUSED(r);
1274 ARG_UNUSED(buff);
1275 ARG_UNUSED(buff_len);
1276#endif
1277}
1278
1282static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1283{
1285
1286#ifdef CONFIG_RTIO_SUBMIT_SEM
1287 k_object_access_grant(r->submit_sem, t);
1288#endif
1289
1290#ifdef CONFIG_RTIO_CONSUME_SEM
1291 k_object_access_grant(r->consume_sem, t);
1292#endif
1293}
1294
1305__syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1306
1307static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1308{
1309 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1310
1311 do {
1312 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1313 iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1314 } while (iodev_sqe != NULL);
1315
1316 return 0;
1317}
1318
1334__syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1335 struct rtio_sqe **handle, size_t sqe_count);
1336
1337static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1338 struct rtio_sqe **handle,
1339 size_t sqe_count)
1340{
1341 struct rtio_sqe *sqe;
1342 uint32_t acquirable = rtio_sqe_acquirable(r);
1343
1344 if (acquirable < sqe_count) {
1345 return -ENOMEM;
1346 }
1347
1348 for (unsigned long i = 0; i < sqe_count; i++) {
1349 sqe = rtio_sqe_acquire(r);
1350 __ASSERT_NO_MSG(sqe != NULL);
1351 if (handle != NULL && i == 0) {
1352 *handle = sqe;
1353 }
1354 *sqe = sqes[i];
1355 }
1356
1357 return 0;
1358}
1359
1376static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1377{
1378 return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1379}
1380
1396__syscall int rtio_cqe_copy_out(struct rtio *r,
1397 struct rtio_cqe *cqes,
1398 size_t cqe_count,
1399 k_timeout_t timeout);
1400static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1401 struct rtio_cqe *cqes,
1402 size_t cqe_count,
1403 k_timeout_t timeout)
1404{
1405 size_t copied = 0;
1406 struct rtio_cqe *cqe;
1407 k_timepoint_t end = sys_timepoint_calc(timeout);
1408
1409 do {
1412 if (cqe == NULL) {
1413 Z_SPIN_DELAY(25);
1414 continue;
1415 }
1416 cqes[copied++] = *cqe;
1417 rtio_cqe_release(r, cqe);
1418 } while (copied < cqe_count && !sys_timepoint_expired(end));
1419
1420 return copied;
1421}
1422
1436__syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1437
1438static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1439{
1440 int res = 0;
1441
1442#ifdef CONFIG_RTIO_SUBMIT_SEM
1443 /* TODO undefined behavior if another thread calls submit of course
1444 */
1445 if (wait_count > 0) {
1446 __ASSERT(!k_is_in_isr(),
1447 "expected rtio submit with wait count to be called from a thread");
1448
1449 k_sem_reset(r->submit_sem);
1450 r->submit_count = wait_count;
1451 }
1452#else
1453 uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count) + wait_count;
1454#endif
1455
1456 /* Submit the queue to the executor which consumes submissions
1457 * and produces completions through ISR chains or other means.
1458 */
1460
1461
1462 /* TODO could be nicer if we could suspend the thread and not
1463 * wake up on each completion here.
1464 */
1465#ifdef CONFIG_RTIO_SUBMIT_SEM
1466
1467 if (wait_count > 0) {
1468 res = k_sem_take(r->submit_sem, K_FOREVER);
1469 __ASSERT(res == 0,
1470 "semaphore was reset or timed out while waiting on completions!");
1471 }
1472#else
1473 while ((uintptr_t)atomic_get(&r->cq_count) < cq_count) {
1474 Z_SPIN_DELAY(10);
1475 k_yield();
1476 }
1477#endif
1478
1479 return res;
1480}
1481
1486#ifdef __cplusplus
1487}
1488#endif
1489
1490#include <zephyr/syscalls/rtio.h>
1491
1492#endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
workaround assembler barfing for ST r
Definition asm-macro-32-bit-gnu.h:24
long atomic_t
Definition atomic_types.h:15
atomic_val_t atomic_get(const atomic_t *target)
Atomic get.
atomic_val_t atomic_inc(atomic_t *target)
Atomic increment.
#define K_FOREVER
Generate infinite timeout delay.
Definition kernel.h:1440
#define K_NO_WAIT
Generate null timeout delay.
Definition kernel.h:1330
k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
Calculate a timepoint value.
static bool sys_timepoint_expired(k_timepoint_t timepoint)
Indicates if timepoint is expired.
Definition sys_clock.h:318
#define K_TIMEOUT_EQ(a, b)
Compare timeouts for equality.
Definition sys_clock.h:80
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int sys_mem_blocks_free_contiguous(sys_mem_blocks_t *mem_block, void *block, size_t count)
Free contiguous multiple memory blocks.
int sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t *mem_block, size_t count, void **out_block)
Allocate a contiguous set of memory blocks.
static ALWAYS_INLINE void mpsc_push(struct mpsc *q, struct mpsc_node *n)
Push a node.
Definition mpsc_lockfree.h:126
static struct mpsc_node * mpsc_pop(struct mpsc *q)
Pop a node off of the list.
Definition mpsc_lockfree.h:145
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags)
Get the block count of a mempool flags.
Definition rtio.h:178
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags)
Get the block index of a mempool flags.
Definition rtio.h:170
#define RTIO_CQE_FLAG_MEMPOOL_BUFFER
The entry's buffer was allocated from the RTIO's mempool.
Definition rtio.h:160
#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)
Prepare CQE flags for a mempool read.
Definition rtio.h:187
#define RTIO_CQE_FLAG_GET(flags)
Definition rtio.h:162
#define RTIO_SQE_MULTISHOT
The SQE should continue producing CQEs until canceled.
Definition rtio.h:136
#define RTIO_SQE_TRANSACTION
The next request in the queue is part of a transaction.
Definition rtio.h:108
#define RTIO_SQE_MEMPOOL_BUFFER
The buffer should be allocated by the RTIO mempool.
Definition rtio.h:120
#define RTIO_SQE_CANCELED
The SQE should not execute if possible.
Definition rtio.h:128
#define RTIO_SQE_NO_RESPONSE
The SQE does not produce a CQE.
Definition rtio.h:141
#define RTIO_SQE_CHAINED
The next request in the queue should wait on this one.
Definition rtio.h:96
static void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare a read op submission with context's mempool.
Definition rtio.h:523
void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result)
#define RTIO_OP_CALLBACK
An operation that calls a given function (callback)
Definition rtio.h:475
static uint32_t rtio_sqe_acquirable(struct rtio *r)
Count of acquirable submission queue events.
Definition rtio.h:869
static void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
Definition rtio.h:687
static void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tiny_write_data, uint8_t tiny_write_len, void *userdata)
Prepare a tiny write op submission.
Definition rtio.h:568
static size_t rtio_mempool_block_size(const struct rtio *r)
Get the mempool block size of the RTIO context.
Definition rtio.h:387
static void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
Submit a completion queue event with a given result and userdata.
Definition rtio.h:1167
static void rtio_sqe_prep_nop(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, void *userdata)
Prepare a nop (no op) submission.
Definition rtio.h:489
void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
Release memory that was allocated by the RTIO's memory pool.
static int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
Copy an array of SQEs into the queue.
Definition rtio.h:1376
static void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
Produce a complete queue event if available.
Definition rtio.h:979
#define RTIO_OP_TINY_TX
An operation that transmits tiny writes by copying the data to write.
Definition rtio.h:472
static uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
Compute the CQE flags from the rtio_iodev_sqe entry.
Definition rtio.h:1060
void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
static int rtio_block_pool_alloc(struct rtio *r, size_t min_sz, size_t max_sz, uint8_t **buf, uint32_t *buf_len)
Definition rtio.h:694
static void rtio_sqe_prep_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *buf, uint32_t len, void *userdata)
Prepare a write op submission.
Definition rtio.h:542
int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes, struct rtio_sqe **handle, size_t sqe_count)
Copy an array of SQEs into the queue and get resulting handles back.
static struct rtio_cqe * rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
Definition rtio.h:670
struct k_mem_partition rtio_partition
The memory partition associated with all RTIO context information.
static void rtio_sqe_prep_read(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *buf, uint32_t len, void *userdata)
Prepare a read op submission.
Definition rtio.h:502
static struct rtio_sqe * rtio_sqe_acquire(struct rtio *r)
Acquire a single submission queue event if available.
Definition rtio.h:930
#define RTIO_OP_TX
An operation that transmits (writes)
Definition rtio.h:469
static void rtio_sqe_drop_all(struct rtio *r)
Drop all previously acquired sqe.
Definition rtio.h:948
static void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Definition rtio.h:531
int rtio_cqe_copy_out(struct rtio *r, struct rtio_cqe *cqes, size_t cqe_count, k_timeout_t timeout)
Copy an array of CQEs from the queue.
static void rtio_sqe_prep_callback(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission.
Definition rtio.h:594
static void rtio_access_grant(struct rtio *r, struct k_thread *t)
Grant access to an RTIO context to a user thread.
Definition rtio.h:1282
#define RTIO_OP_TXRX
An operation that transceives (reads and writes simultaneously)
Definition rtio.h:478
static void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
Release consumed completion queue event.
Definition rtio.h:1049
static int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len, uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
Get the buffer associate with the RX submission.
Definition rtio.h:1208
static void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submissions completion with error.
Definition rtio.h:1151
static void rtio_sqe_prep_transceive(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tx_buf, uint8_t *rx_buf, uint32_t buf_len, void *userdata)
Prepare a transceive op submission.
Definition rtio.h:630
int rtio_sqe_cancel(struct rtio_sqe *sqe)
Attempt to cancel an SQE.
static void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
Definition rtio.h:663
static void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submission completion with success.
Definition rtio.h:1138
void(* rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0)
Callback signature for RTIO_OP_CALLBACK.
Definition rtio.h:227
#define RTIO_OP_NOP
An operation that does nothing and will complete immediately.
Definition rtio.h:463
static struct rtio_cqe * rtio_cqe_acquire(struct rtio *r)
Acquire a complete queue event if available.
Definition rtio.h:963
static struct rtio_iodev_sqe * rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain.
Definition rtio.h:900
static struct rtio_cqe * rtio_cqe_consume(struct rtio *r)
Consume a single completion queue event if available.
Definition rtio.h:995
static struct rtio_iodev_sqe * rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
Definition rtio.h:648
static struct rtio_iodev_sqe * rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain or transaction.
Definition rtio.h:917
static void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission that does not create a CQE.
Definition rtio.h:618
int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe, uint8_t **buff, uint32_t *buff_len)
Retrieve the mempool buffer that was allocated for the CQE.
static struct rtio_iodev_sqe * rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the transaction.
Definition rtio.h:882
void rtio_executor_submit(struct rtio *r)
static struct rtio_cqe * rtio_cqe_consume_block(struct rtio *r)
Wait for and consume a single completion queue event.
Definition rtio.h:1025
static void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
Definition rtio.h:735
#define RTIO_OP_RX
An operation that receives (reads)
Definition rtio.h:466
int rtio_submit(struct rtio *r, uint32_t wait_count)
Submit I/O requests to the underlying executor.
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
#define BIT(n)
Unsigned integer with bit position n set (signed in assembly language).
Definition util_macro.h:44
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition util.h:284
#define DIV_ROUND_UP(n, d)
Divide and round up.
Definition util.h:352
#define EINVAL
Invalid argument.
Definition errno.h:60
#define ENOMEM
Not enough core.
Definition errno.h:50
#define ENOTSUP
Unsupported value.
Definition errno.h:114
void k_yield(void)
Yield the current thread.
void k_object_access_grant(const void *object, struct k_thread *thread)
Grant a thread access to a kernel object.
Public kernel APIs.
Memory Blocks Allocator.
A wait-free intrusive multi producer single consumer (MPSC) queue using a singly linked list.
flags
Definition parser.h:96
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__INT32_TYPE__ int32_t
Definition stdint.h:74
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
#define UINT16_MAX
Definition stdint.h:28
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
__UINT16_TYPE__ uint16_t
Definition stdint.h:89
__INT8_TYPE__ int8_t
Definition stdint.h:72
void * memset(void *buf, int c, size_t n)
void * memcpy(void *ZRESTRICT d, const void *ZRESTRICT s, size_t n)
Memory Partition.
Definition mem_domain.h:55
Thread Structure.
Definition thread.h:259
Kernel timeout type.
Definition sys_clock.h:65
Kernel timepoint type.
Definition sys_clock.h:225
Queue member.
Definition mpsc_lockfree.h:79
MPSC Queue.
Definition mpsc_lockfree.h:86
Definition rtio.h:315
struct rtio_cqe * pool
Definition rtio.h:319
struct mpsc free_q
Definition rtio.h:316
const uint16_t pool_size
Definition rtio.h:317
uint16_t pool_free
Definition rtio.h:318
A completion queue event.
Definition rtio.h:300
void * userdata
Associated userdata with operation.
Definition rtio.h:304
struct mpsc_node q
Definition rtio.h:301
uint32_t flags
Flags associated with the operation.
Definition rtio.h:305
int32_t result
Result from operation.
Definition rtio.h:303
API that an RTIO IO device should implement.
Definition rtio.h:439
void(* submit)(struct rtio_iodev_sqe *iodev_sqe)
Submit to the iodev an entry to work on.
Definition rtio.h:448
Compute the mempool block index for a given pointer.
Definition rtio.h:429
struct rtio_iodev_sqe * next
Definition rtio.h:432
struct rtio_sqe sqe
Definition rtio.h:430
struct rtio * r
Definition rtio.h:433
struct mpsc_node q
Definition rtio.h:431
An IO device with a function table for submitting requests.
Definition rtio.h:454
const struct rtio_iodev_api * api
Definition rtio.h:456
void * data
Definition rtio.h:459
Definition rtio.h:308
struct rtio_iodev_sqe * pool
Definition rtio.h:312
const uint16_t pool_size
Definition rtio.h:310
struct mpsc free_q
Definition rtio.h:309
uint16_t pool_free
Definition rtio.h:311
A submission queue event.
Definition rtio.h:232
uint32_t i2c_config
OP_I2C_CONFIGURE.
Definition rtio.h:288
void * userdata
User provided data which is returned upon operation completion.
Definition rtio.h:252
const uint8_t * tx_buf
Buffer to write from.
Definition rtio.h:283
uint8_t op
Op code.
Definition rtio.h:233
uint8_t buf_len
Length of tiny buffer.
Definition rtio.h:270
void * arg0
Last argument given to callback.
Definition rtio.h:277
struct rtio_sqe::@425::@427 tx
OP_TX.
uint8_t * rx_buf
Buffer to read into.
Definition rtio.h:284
uint8_t prio
Op priority.
Definition rtio.h:235
uint8_t * buf
Buffer to read into.
Definition rtio.h:265
uint32_t buf_len
Length of buffer.
Definition rtio.h:258
const struct rtio_iodev * iodev
Device to operation on.
Definition rtio.h:243
struct rtio_sqe::@425::@428 rx
OP_RX.
struct rtio_sqe::@425::@429 tiny_tx
OP_TINY_TX.
struct rtio_sqe::@425::@431 txrx
OP_TXRX.
uint16_t flags
Op Flags.
Definition rtio.h:237
const uint8_t * buf
Buffer to write from.
Definition rtio.h:259
rtio_callback_t callback
Definition rtio.h:276
uint16_t iodev_flags
Op iodev flags.
Definition rtio.h:239
An RTIO context containing what can be viewed as a pair of queues.
Definition rtio.h:333
struct rtio_cqe_pool * cqe_pool
Definition rtio.h:363
struct mpsc sq
Definition rtio.h:371
atomic_t cq_count
Definition rtio.h:352
struct rtio_sqe_pool * sqe_pool
Definition rtio.h:360
atomic_t xcqcnt
Definition rtio.h:357
struct mpsc cq
Definition rtio.h:374
Misc utilities.