Zephyr API Documentation  3.5.0
A Scalable Open Source RTOS
3.5.0
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
rtio.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2022 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
26#ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27#define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28
29#include <string.h>
30
32#include <zephyr/device.h>
33#include <zephyr/kernel.h>
35#include <zephyr/sys/__assert.h>
36#include <zephyr/sys/atomic.h>
38#include <zephyr/sys/util.h>
40
41#ifdef __cplusplus
42extern "C" {
43#endif
44
45
63#define RTIO_PRIO_LOW 0U
64
68#define RTIO_PRIO_NORM 127U
69
73#define RTIO_PRIO_HIGH 255U
74
94#define RTIO_SQE_CHAINED BIT(0)
95
106#define RTIO_SQE_TRANSACTION BIT(1)
107
108
118#define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
119
126#define RTIO_SQE_CANCELED BIT(3)
127
134#define RTIO_SQE_MULTISHOT BIT(4)
135
139#define RTIO_SQE_NO_RESPONSE BIT(5)
140
158#define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
159
160#define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
161
168#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
169
176#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
177
185#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt) \
186 (FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) | \
187 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
188
196#define RTIO_IODEV_I2C_STOP BIT(0)
197
201#define RTIO_IODEV_I2C_RESTART BIT(1)
202
206#define RTIO_IODEV_I2C_10_BITS BIT(2)
207
209struct rtio;
210struct rtio_cqe;
211struct rtio_sqe;
212struct rtio_sqe_pool;
213struct rtio_cqe_pool;
214struct rtio_iodev;
215struct rtio_iodev_sqe;
225typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
226
230struct rtio_sqe {
239 uint16_t _resv0;
240
241 const struct rtio_iodev *iodev;
250 void *userdata;
251
252 union {
253
255 struct {
258 };
259
261 struct {
263 uint8_t tiny_buf[7];
264 };
265
267 struct {
269 void *arg0;
270 };
271
273 struct {
277 };
278
279 };
280};
281
283/* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
284BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
290struct rtio_cqe {
292
294 void *userdata;
296};
297
303};
304
309 struct rtio_cqe *pool;
310};
311
323struct rtio {
324#ifdef CONFIG_RTIO_SUBMIT_SEM
325 /* A wait semaphore which may suspend the calling thread
326 * to wait for some number of completions when calling submit
327 */
328 struct k_sem *submit_sem;
329
330 uint32_t submit_count;
331#endif
332
333#ifdef CONFIG_RTIO_CONSUME_SEM
334 /* A wait semaphore which may suspend the calling thread
335 * to wait for some number of completions while consuming
336 * them from the completion queue
337 */
338 struct k_sem *consume_sem;
339#endif
340
341 /* Total number of completions */
343
344 /* Number of completions that were unable to be submitted with results
345 * due to the cq spsc being full
346 */
348
349 /* Submission queue object pool with free list */
351
352 /* Complete queue object pool with free list */
354
355#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
356 /* Mem block pool */
357 struct sys_mem_blocks *block_pool;
358#endif
359
360 /* Submission queue */
361 struct rtio_mpsc sq;
362
363 /* Completion queue */
364 struct rtio_mpsc cq;
365};
366
368extern struct k_mem_partition rtio_partition;
369
377static inline size_t rtio_mempool_block_size(const struct rtio *r)
378{
379#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
380 ARG_UNUSED(r);
381 return 0;
382#else
383 if (r == NULL || r->block_pool == NULL) {
384 return 0;
385 }
386 return BIT(r->block_pool->info.blk_sz_shift);
387#endif
388}
389
397#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
398static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
399{
400 uintptr_t addr = (uintptr_t)ptr;
401 struct sys_mem_blocks *mem_pool = r->block_pool;
402 uint32_t block_size = rtio_mempool_block_size(r);
403
404 uintptr_t buff = (uintptr_t)mem_pool->buffer;
405 uint32_t buff_size = mem_pool->info.num_blocks * block_size;
406
407 if (addr < buff || addr >= buff + buff_size) {
408 return UINT16_MAX;
409 }
410 return (addr - buff) / block_size;
411}
412#endif
413
420 struct rtio_sqe sqe;
423 struct rtio *r;
424};
425
438 void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
439};
440
445 /* Function pointer table */
446 const struct rtio_iodev_api *api;
447
448 /* Queue of RTIO contexts with requests */
450
451 /* Data associated with this iodev */
452 void *data;
453};
454
456#define RTIO_OP_NOP 0
457
459#define RTIO_OP_RX (RTIO_OP_NOP+1)
460
462#define RTIO_OP_TX (RTIO_OP_RX+1)
463
465#define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
466
468#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
469
471#define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
472
473
477static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
478 const struct rtio_iodev *iodev,
479 void *userdata)
480{
481 memset(sqe, 0, sizeof(struct rtio_sqe));
482 sqe->op = RTIO_OP_NOP;
483 sqe->iodev = iodev;
484 sqe->userdata = userdata;
485}
486
490static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
491 const struct rtio_iodev *iodev,
492 int8_t prio,
493 uint8_t *buf,
494 uint32_t len,
495 void *userdata)
496{
497 memset(sqe, 0, sizeof(struct rtio_sqe));
498 sqe->op = RTIO_OP_RX;
499 sqe->prio = prio;
500 sqe->iodev = iodev;
501 sqe->buf_len = len;
502 sqe->buf = buf;
503 sqe->userdata = userdata;
504}
505
511static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
512 const struct rtio_iodev *iodev, int8_t prio,
513 void *userdata)
514{
515 rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
517}
518
519static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
520 const struct rtio_iodev *iodev, int8_t prio,
521 void *userdata)
522{
523 rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
525}
526
530static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
531 const struct rtio_iodev *iodev,
532 int8_t prio,
533 uint8_t *buf,
534 uint32_t len,
535 void *userdata)
536{
537 memset(sqe, 0, sizeof(struct rtio_sqe));
538 sqe->op = RTIO_OP_TX;
539 sqe->prio = prio;
540 sqe->iodev = iodev;
541 sqe->buf_len = len;
542 sqe->buf = buf;
543 sqe->userdata = userdata;
544}
545
556static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
557 const struct rtio_iodev *iodev,
558 int8_t prio,
559 const uint8_t *tiny_write_data,
560 uint8_t tiny_write_len,
561 void *userdata)
562{
563 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_buf));
564
565 memset(sqe, 0, sizeof(struct rtio_sqe));
566 sqe->op = RTIO_OP_TINY_TX;
567 sqe->prio = prio;
568 sqe->iodev = iodev;
569 sqe->tiny_buf_len = tiny_write_len;
570 memcpy(sqe->tiny_buf, tiny_write_data, tiny_write_len);
571 sqe->userdata = userdata;
572}
573
582static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
583 rtio_callback_t callback,
584 void *arg0,
585 void *userdata)
586{
587 memset(sqe, 0, sizeof(struct rtio_sqe));
588 sqe->op = RTIO_OP_CALLBACK;
589 sqe->prio = 0;
590 sqe->iodev = NULL;
591 sqe->callback = callback;
592 sqe->arg0 = arg0;
593 sqe->userdata = userdata;
594}
595
599static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
600 const struct rtio_iodev *iodev,
601 int8_t prio,
602 uint8_t *tx_buf,
603 uint8_t *rx_buf,
604 uint32_t buf_len,
605 void *userdata)
606{
607 memset(sqe, 0, sizeof(struct rtio_sqe));
608 sqe->op = RTIO_OP_TXRX;
609 sqe->prio = prio;
610 sqe->iodev = iodev;
611 sqe->txrx_buf_len = buf_len;
612 sqe->tx_buf = tx_buf;
613 sqe->rx_buf = rx_buf;
614 sqe->userdata = userdata;
615}
616
617static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
618{
619 struct rtio_mpsc_node *node = rtio_mpsc_pop(&pool->free_q);
620
621 if (node == NULL) {
622 return NULL;
623 }
624
625 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
626
627 pool->pool_free--;
628
629 return iodev_sqe;
630}
631
632static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
633{
634 rtio_mpsc_push(&pool->free_q, &iodev_sqe->q);
635
636 pool->pool_free++;
637}
638
639static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
640{
641 struct rtio_mpsc_node *node = rtio_mpsc_pop(&pool->free_q);
642
643 if (node == NULL) {
644 return NULL;
645 }
646
647 struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
648
649 memset(cqe, 0, sizeof(struct rtio_cqe));
650
651 pool->pool_free--;
652
653 return cqe;
654}
655
656static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
657{
658 rtio_mpsc_push(&pool->free_q, &cqe->q);
659
660 pool->pool_free++;
661}
662
663static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
664 size_t max_sz, uint8_t **buf, uint32_t *buf_len)
665{
666#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
667 ARG_UNUSED(r);
668 ARG_UNUSED(min_sz);
669 ARG_UNUSED(max_sz);
670 ARG_UNUSED(buf);
671 ARG_UNUSED(buf_len);
672 return -ENOTSUP;
673#else
674 const uint32_t block_size = rtio_mempool_block_size(r);
675 uint32_t bytes = max_sz;
676
677 do {
678 size_t num_blks = DIV_ROUND_UP(bytes, block_size);
679 int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
680
681 if (rc == 0) {
682 *buf_len = num_blks * block_size;
683 return 0;
684 }
685
686 bytes -= block_size;
687 } while (bytes >= min_sz);
688
689 return -ENOMEM;
690#endif
691}
692
693static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
694{
695#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
696 size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
697
698 sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
699#endif
700}
701
702/* Do not try and reformat the macros */
703/* clang-format off */
704
712#define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data) \
713 STRUCT_SECTION_ITERABLE(rtio_iodev, name) = { \
714 .api = (iodev_api), \
715 .iodev_sq = RTIO_MPSC_INIT((name.iodev_sq)), \
716 .data = (iodev_data), \
717 }
718
719#define Z_RTIO_SQE_POOL_DEFINE(name, sz) \
720 static struct rtio_iodev_sqe _sqe_pool_##name[sz]; \
721 STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = { \
722 .free_q = RTIO_MPSC_INIT((name.free_q)), \
723 .pool_size = sz, \
724 .pool_free = sz, \
725 .pool = _sqe_pool_##name, \
726 }
727
728
729#define Z_RTIO_CQE_POOL_DEFINE(name, sz) \
730 static struct rtio_cqe _cqe_pool_##name[sz]; \
731 STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = { \
732 .free_q = RTIO_MPSC_INIT((name.free_q)), \
733 .pool_size = sz, \
734 .pool_free = sz, \
735 .pool = _cqe_pool_##name, \
736 }
737
747#define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
748
758#define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
759
760#define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align) \
761 RTIO_BMEM uint8_t __aligned(WB_UP(blk_align)) \
762 _block_pool_##name[blk_cnt*WB_UP(blk_sz)]; \
763 _SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt, _block_pool_##name, \
764 RTIO_DMEM)
765
766#define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool) \
767 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, \
768 (static K_SEM_DEFINE(_submit_sem_##name, 0, K_SEM_MAX_LIMIT))) \
769 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, \
770 (static K_SEM_DEFINE(_consume_sem_##name, 0, K_SEM_MAX_LIMIT))) \
771 STRUCT_SECTION_ITERABLE(rtio, name) = { \
772 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &_submit_sem_##name,)) \
773 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,)) \
774 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &_consume_sem_##name,)) \
775 .cq_count = ATOMIC_INIT(0), \
776 .xcqcnt = ATOMIC_INIT(0), \
777 .sqe_pool = _sqe_pool, \
778 .cqe_pool = _cqe_pool, \
779 IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,)) \
780 .sq = RTIO_MPSC_INIT((name.sq)), \
781 .cq = RTIO_MPSC_INIT((name.cq)), \
782 }
783
791#define RTIO_DEFINE(name, sq_sz, cq_sz) \
792 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
793 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
794 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, NULL) \
795
796/* clang-format on */
797
808#define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
809 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
810 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
811 Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
812 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
813
814/* clang-format on */
815
823static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
824{
825 return r->sqe_pool->pool_free;
826}
827
836static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
837{
838 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
839 return iodev_sqe->next;
840 } else {
841 return NULL;
842 }
843}
844
845
854static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
855{
856 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
857 return iodev_sqe->next;
858 } else {
859 return NULL;
860 }
861}
862
871static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
872{
873 return iodev_sqe->next;
874}
875
884static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
885{
886 struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
887
888 if (iodev_sqe == NULL) {
889 return NULL;
890 }
891
892 rtio_mpsc_push(&r->sq, &iodev_sqe->q);
893
894 return &iodev_sqe->sqe;
895}
896
902static inline void rtio_sqe_drop_all(struct rtio *r)
903{
904 struct rtio_iodev_sqe *iodev_sqe;
905 struct rtio_mpsc_node *node = rtio_mpsc_pop(&r->sq);
906
907 while (node != NULL) {
908 iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
909 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
910 node = rtio_mpsc_pop(&r->sq);
911 }
912}
913
917static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
918{
919 struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
920
921 if (cqe == NULL) {
922 return NULL;
923 }
924
925 memset(cqe, 0, sizeof(struct rtio_cqe));
926
927 return cqe;
928}
929
933static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
934{
935 rtio_mpsc_push(&r->cq, &cqe->q);
936}
937
949static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
950{
951 struct rtio_mpsc_node *node;
952 struct rtio_cqe *cqe = NULL;
953
954#ifdef CONFIG_RTIO_CONSUME_SEM
955 if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
956 return NULL;
957 }
958#endif
959
960 node = rtio_mpsc_pop(&r->cq);
961 if (node == NULL) {
962 return NULL;
963 }
964 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
965
966 return cqe;
967}
968
979static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
980{
981 struct rtio_mpsc_node *node;
982 struct rtio_cqe *cqe;
983
984#ifdef CONFIG_RTIO_CONSUME_SEM
985 k_sem_take(r->consume_sem, K_FOREVER);
986#endif
987 node = rtio_mpsc_pop(&r->cq);
988 while (node == NULL) {
989 Z_SPIN_DELAY(1);
990 node = rtio_mpsc_pop(&r->cq);
991 }
992 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
993
994 return cqe;
995}
996
1003static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1004{
1005 rtio_cqe_pool_free(r->cqe_pool, cqe);
1006}
1007
1014static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1015{
1016 uint32_t flags = 0;
1017
1018#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1019 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1020 struct rtio *r = iodev_sqe->r;
1021 struct sys_mem_blocks *mem_pool = r->block_pool;
1022 int blk_index = (iodev_sqe->sqe.buf - mem_pool->buffer) >>
1023 mem_pool->info.blk_sz_shift;
1024 int blk_count = iodev_sqe->sqe.buf_len >> mem_pool->info.blk_sz_shift;
1025
1026 flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1027 }
1028#else
1029 ARG_UNUSED(iodev_sqe);
1030#endif
1031
1032 return flags;
1033}
1034
1050__syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1051 uint8_t **buff, uint32_t *buff_len);
1052
1053static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1054 uint8_t **buff, uint32_t *buff_len)
1055{
1056#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1058 int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1059 int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1061
1062 *buff = r->block_pool->buffer + blk_idx * blk_size;
1063 *buff_len = blk_count * blk_size;
1064 __ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
1065 __ASSERT_NO_MSG(*buff <
1066 r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
1067 return 0;
1068 }
1069 return -EINVAL;
1070#else
1071 ARG_UNUSED(r);
1072 ARG_UNUSED(cqe);
1073 ARG_UNUSED(buff);
1074 ARG_UNUSED(buff_len);
1075
1076 return -ENOTSUP;
1077#endif
1078}
1079
1081void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1082void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1083
1092static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1093{
1094 rtio_executor_ok(iodev_sqe, result);
1095}
1096
1105static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1106{
1107 rtio_executor_err(iodev_sqe, result);
1108}
1109
1115static inline void rtio_iodev_cancel_all(struct rtio_iodev *iodev)
1116{
1117 /* Clear pending requests as -ENODATA */
1118 struct rtio_mpsc_node *node = rtio_mpsc_pop(&iodev->iodev_sq);
1119
1120 while (node != NULL) {
1121 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
1122
1123 rtio_iodev_sqe_err(iodev_sqe, -ECANCELED);
1124 node = rtio_mpsc_pop(&iodev->iodev_sq);
1125 }
1126}
1127
1139static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1140{
1141 struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1142
1143 if (cqe == NULL) {
1144 atomic_inc(&r->xcqcnt);
1145 } else {
1146 cqe->result = result;
1147 cqe->userdata = userdata;
1148 cqe->flags = flags;
1149 rtio_cqe_produce(r, cqe);
1150 }
1151
1152 atomic_inc(&r->cq_count);
1153#ifdef CONFIG_RTIO_SUBMIT_SEM
1154 if (r->submit_count > 0) {
1155 r->submit_count--;
1156 if (r->submit_count == 0) {
1157 k_sem_give(r->submit_sem);
1158 }
1159 }
1160#endif
1161#ifdef CONFIG_RTIO_CONSUME_SEM
1162 k_sem_give(r->consume_sem);
1163#endif
1164}
1165
1166#define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1167
1180static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1181 uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1182{
1183 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1184
1185#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1186 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1187 struct rtio *r = iodev_sqe->r;
1188
1189 if (sqe->buf != NULL) {
1190 if (sqe->buf_len < min_buf_len) {
1191 return -ENOMEM;
1192 }
1193 *buf = sqe->buf;
1194 *buf_len = sqe->buf_len;
1195 return 0;
1196 }
1197
1198 int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
1199 if (rc == 0) {
1200 sqe->buf = *buf;
1201 sqe->buf_len = *buf_len;
1202 return 0;
1203 }
1204
1205 return -ENOMEM;
1206 }
1207#else
1208 ARG_UNUSED(max_buf_len);
1209#endif
1210
1211 if (sqe->buf_len < min_buf_len) {
1212 return -ENOMEM;
1213 }
1214
1215 *buf = sqe->buf;
1216 *buf_len = sqe->buf_len;
1217 return 0;
1218}
1219
1234__syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1235
1236static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1237{
1238#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1239 if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1240 return;
1241 }
1242
1243 rtio_block_pool_free(r, buff, buff_len);
1244#else
1245 ARG_UNUSED(r);
1246 ARG_UNUSED(buff);
1247 ARG_UNUSED(buff_len);
1248#endif
1249}
1250
1254static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1255{
1257
1258#ifdef CONFIG_RTIO_SUBMIT_SEM
1259 k_object_access_grant(r->submit_sem, t);
1260#endif
1261
1262#ifdef CONFIG_RTIO_CONSUME_SEM
1263 k_object_access_grant(r->consume_sem, t);
1264#endif
1265}
1266
1277__syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1278
1279static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1280{
1281 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1282
1283 do {
1284 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1285 iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1286 } while (iodev_sqe != NULL);
1287
1288 return 0;
1289}
1290
1306__syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1307 struct rtio_sqe **handle, size_t sqe_count);
1308
1309static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1310 struct rtio_sqe **handle,
1311 size_t sqe_count)
1312{
1313 struct rtio_sqe *sqe;
1314 uint32_t acquirable = rtio_sqe_acquirable(r);
1315
1316 if (acquirable < sqe_count) {
1317 return -ENOMEM;
1318 }
1319
1320 for (unsigned long i = 0; i < sqe_count; i++) {
1321 sqe = rtio_sqe_acquire(r);
1322 __ASSERT_NO_MSG(sqe != NULL);
1323 if (handle != NULL && i == 0) {
1324 *handle = sqe;
1325 }
1326 *sqe = sqes[i];
1327 }
1328
1329 return 0;
1330}
1331
1348static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1349{
1350 return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1351}
1352
1368__syscall int rtio_cqe_copy_out(struct rtio *r,
1369 struct rtio_cqe *cqes,
1370 size_t cqe_count,
1371 k_timeout_t timeout);
1372static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1373 struct rtio_cqe *cqes,
1374 size_t cqe_count,
1375 k_timeout_t timeout)
1376{
1377 size_t copied = 0;
1378 struct rtio_cqe *cqe;
1379 k_timepoint_t end = sys_timepoint_calc(timeout);
1380
1381 do {
1384 if (cqe == NULL) {
1385#ifdef CONFIG_BOARD_NATIVE_POSIX
1386 /* Native posix fakes the clock and only moves it forward when sleeping. */
1387 k_sleep(K_TICKS(1));
1388#else
1389 Z_SPIN_DELAY(1);
1390#endif
1391 continue;
1392 }
1393 cqes[copied++] = *cqe;
1394 rtio_cqe_release(r, cqe);
1395 } while (copied < cqe_count && !sys_timepoint_expired(end));
1396
1397 return copied;
1398}
1399
1413__syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1414
1415static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1416{
1417 int res = 0;
1418
1419#ifdef CONFIG_RTIO_SUBMIT_SEM
1420 /* TODO undefined behavior if another thread calls submit of course
1421 */
1422 if (wait_count > 0) {
1423 __ASSERT(!k_is_in_isr(),
1424 "expected rtio submit with wait count to be called from a thread");
1425
1426 k_sem_reset(r->submit_sem);
1427 r->submit_count = wait_count;
1428 }
1429#else
1430 uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count) + wait_count;
1431#endif
1432
1433 /* Submit the queue to the executor which consumes submissions
1434 * and produces completions through ISR chains or other means.
1435 */
1437
1438
1439 /* TODO could be nicer if we could suspend the thread and not
1440 * wake up on each completion here.
1441 */
1442#ifdef CONFIG_RTIO_SUBMIT_SEM
1443
1444 if (wait_count > 0) {
1445 res = k_sem_take(r->submit_sem, K_FOREVER);
1446 __ASSERT(res == 0,
1447 "semaphore was reset or timed out while waiting on completions!");
1448 }
1449#else
1450 while ((uintptr_t)atomic_get(&r->cq_count) < cq_count) {
1451 Z_SPIN_DELAY(10);
1452 k_yield();
1453 }
1454#endif
1455
1456 return res;
1457}
1458
1463#ifdef __cplusplus
1464}
1465#endif
1466
1467#include <syscalls/rtio.h>
1468
1469#endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
workaround assembler barfing for ST r
Definition: asm-macro-32-bit-gnu.h:24
long atomic_t
Definition: atomic.h:22
static ALWAYS_INLINE atomic_val_t atomic_inc(atomic_t *target)
Definition: atomic_xtensa.h:88
static ALWAYS_INLINE atomic_val_t atomic_get(const atomic_t *target)
Copyright (c) 2021 Intel Corporation SPDX-License-Identifier: Apache-2.0.
Definition: atomic_xtensa.h:16
#define K_FOREVER
Generate infinite timeout delay.
Definition: kernel.h:1372
#define K_NO_WAIT
Generate null timeout delay.
Definition: kernel.h:1262
k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
Calculate a timepoint value.
static bool sys_timepoint_expired(k_timepoint_t timepoint)
Indicates if timepoint is expired.
Definition: sys_clock.h:327
#define K_TIMEOUT_EQ(a, b)
Compare timeouts for equality.
Definition: sys_clock.h:80
#define K_TICKS(t)
Generate timeout delay from system ticks.
Definition: kernel.h:1314
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int sys_mem_blocks_free_contiguous(sys_mem_blocks_t *mem_block, void *block, size_t count)
Free contiguous multiple memory blocks.
int sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t *mem_block, size_t count, void **out_block)
Allocate a contiguous set of memory blocks.
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags)
Get the block count of a mempool flags.
Definition: rtio.h:176
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags)
Get the block index of a mempool flags.
Definition: rtio.h:168
#define RTIO_CQE_FLAG_MEMPOOL_BUFFER
The entry's buffer was allocated from the RTIO's mempool.
Definition: rtio.h:158
#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)
Prepare CQE flags for a mempool read.
Definition: rtio.h:185
#define RTIO_CQE_FLAG_GET(flags)
Definition: rtio.h:160
static struct rtio_mpsc_node * rtio_mpsc_pop(struct rtio_mpsc *q)
Pop a node off of the list.
Definition: rtio_mpsc.h:147
static ALWAYS_INLINE void rtio_mpsc_push(struct rtio_mpsc *q, struct rtio_mpsc_node *n)
Push a node.
Definition: rtio_mpsc.h:128
#define RTIO_SQE_MULTISHOT
The SQE should continue producing CQEs until canceled.
Definition: rtio.h:134
#define RTIO_SQE_TRANSACTION
The next request in the queue is part of a transaction.
Definition: rtio.h:106
#define RTIO_SQE_MEMPOOL_BUFFER
The buffer should be allocated by the RTIO mempool.
Definition: rtio.h:118
#define RTIO_SQE_CANCELED
The SQE should not execute if possible.
Definition: rtio.h:126
#define RTIO_SQE_CHAINED
The next request in the queue should wait on this one.
Definition: rtio.h:94
static void rtio_sqe_prep_transceive(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *tx_buf, uint8_t *rx_buf, uint32_t buf_len, void *userdata)
Prepare a transceive op submission.
Definition: rtio.h:599
static void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare a read op submission with context's mempool.
Definition: rtio.h:511
void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result)
#define RTIO_OP_CALLBACK
An operation that calls a given function (callback)
Definition: rtio.h:468
static uint32_t rtio_sqe_acquirable(struct rtio *r)
Count of acquirable submission queue events.
Definition: rtio.h:823
static void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
Definition: rtio.h:656
static void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tiny_write_data, uint8_t tiny_write_len, void *userdata)
Prepare a tiny write op submission.
Definition: rtio.h:556
static void rtio_iodev_cancel_all(struct rtio_iodev *iodev)
Cancel all requests that are pending for the iodev.
Definition: rtio.h:1115
static size_t rtio_mempool_block_size(const struct rtio *r)
Get the mempool block size of the RTIO context.
Definition: rtio.h:377
static void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
Submit a completion queue event with a given result and userdata.
Definition: rtio.h:1139
static void rtio_sqe_prep_nop(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, void *userdata)
Prepare a nop (no op) submission.
Definition: rtio.h:477
void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
Release memory that was allocated by the RTIO's memory pool.
static int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
Copy an array of SQEs into the queue.
Definition: rtio.h:1348
static void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
Produce a complete queue event if available.
Definition: rtio.h:933
#define RTIO_OP_TINY_TX
An operation that transmits tiny writes by copying the data to write.
Definition: rtio.h:465
static uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
Compute the CQE flags from the rtio_iodev_sqe entry.
Definition: rtio.h:1014
void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
static int rtio_block_pool_alloc(struct rtio *r, size_t min_sz, size_t max_sz, uint8_t **buf, uint32_t *buf_len)
Definition: rtio.h:663
int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes, struct rtio_sqe **handle, size_t sqe_count)
Copy an array of SQEs into the queue and get resulting handles back.
static struct rtio_cqe * rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
Definition: rtio.h:639
struct k_mem_partition rtio_partition
The memory partition associated with all RTIO context information.
static void rtio_sqe_prep_read(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *buf, uint32_t len, void *userdata)
Prepare a read op submission.
Definition: rtio.h:490
static struct rtio_sqe * rtio_sqe_acquire(struct rtio *r)
Acquire a single submission queue event if available.
Definition: rtio.h:884
#define RTIO_OP_TX
An operation that transmits (writes)
Definition: rtio.h:462
static void rtio_sqe_drop_all(struct rtio *r)
Drop all previously acquired sqe.
Definition: rtio.h:902
static void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Definition: rtio.h:519
int rtio_cqe_copy_out(struct rtio *r, struct rtio_cqe *cqes, size_t cqe_count, k_timeout_t timeout)
Copy an array of CQEs from the queue.
static void rtio_sqe_prep_callback(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission.
Definition: rtio.h:582
static void rtio_access_grant(struct rtio *r, struct k_thread *t)
Grant access to an RTIO context to a user thread.
Definition: rtio.h:1254
#define RTIO_OP_TXRX
An operation that transceives (reads and writes simultaneously)
Definition: rtio.h:471
static void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
Release consumed completion queue event.
Definition: rtio.h:1003
static int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len, uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
Get the buffer associate with the RX submission.
Definition: rtio.h:1180
static void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submissions completion with error.
Definition: rtio.h:1105
static void rtio_sqe_prep_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *buf, uint32_t len, void *userdata)
Prepare a write op submission.
Definition: rtio.h:530
void(* rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0)
Callback signature for RTIO_OP_CALLBACK.
Definition: rtio.h:225
int rtio_sqe_cancel(struct rtio_sqe *sqe)
Attempt to cancel an SQE.
static void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
Definition: rtio.h:632
static void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submission completion with success.
Definition: rtio.h:1092
#define RTIO_OP_NOP
An operation that does nothing and will complete immediately.
Definition: rtio.h:456
static struct rtio_cqe * rtio_cqe_acquire(struct rtio *r)
Acquire a complete queue event if available.
Definition: rtio.h:917
static struct rtio_iodev_sqe * rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain.
Definition: rtio.h:854
static struct rtio_cqe * rtio_cqe_consume(struct rtio *r)
Consume a single completion queue event if available.
Definition: rtio.h:949
static struct rtio_iodev_sqe * rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
Definition: rtio.h:617
static struct rtio_iodev_sqe * rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain or transaction.
Definition: rtio.h:871
int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe, uint8_t **buff, uint32_t *buff_len)
Retrieve the mempool buffer that was allocated for the CQE.
static struct rtio_iodev_sqe * rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the transaction.
Definition: rtio.h:836
void rtio_executor_submit(struct rtio *r)
static struct rtio_cqe * rtio_cqe_consume_block(struct rtio *r)
Wait for and consume a single completion queue event.
Definition: rtio.h:979
static void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
Definition: rtio.h:693
#define RTIO_OP_RX
An operation that receives (reads)
Definition: rtio.h:459
int rtio_submit(struct rtio *r, uint32_t wait_count)
Submit I/O requests to the underlying executor.
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
#define BIT(n)
Unsigned integer with bit position n set (signed in assembly language).
Definition: util_macro.h:44
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition: util.h:247
#define DIV_ROUND_UP(n, d)
Divide and round up.
Definition: util.h:286
#define EINVAL
Invalid argument.
Definition: errno.h:61
#define ENOMEM
Not enough core.
Definition: errno.h:51
#define ENOTSUP
Unsupported value.
Definition: errno.h:115
#define ECANCELED
Operation canceled.
Definition: errno.h:118
void k_yield(void)
Yield the current thread.
int32_t k_sleep(k_timeout_t timeout)
Put the current thread to sleep.
void k_object_access_grant(const void *object, struct k_thread *thread)
Grant a thread access to a kernel object.
Public kernel APIs.
Memory Blocks Allocator.
flags
Definition: parser.h:96
A wait-free intrusive multi producer single consumer (MPSC) queue using a singly linked list.
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__INT32_TYPE__ int32_t
Definition: stdint.h:74
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
#define UINT16_MAX
Definition: stdint.h:28
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:105
__UINT16_TYPE__ uint16_t
Definition: stdint.h:89
__INT8_TYPE__ int8_t
Definition: stdint.h:72
void * memset(void *buf, int c, size_t n)
void * memcpy(void *ZRESTRICT d, const void *ZRESTRICT s, size_t n)
Memory Partition.
Definition: mem_domain.h:55
Thread Structure.
Definition: thread.h:250
Kernel timeout type.
Definition: sys_clock.h:65
Kernel timepoint type.
Definition: sys_clock.h:219
Definition: rtio.h:305
struct rtio_cqe * pool
Definition: rtio.h:309
const uint16_t pool_size
Definition: rtio.h:307
uint16_t pool_free
Definition: rtio.h:308
struct rtio_mpsc free_q
Definition: rtio.h:306
A completion queue event.
Definition: rtio.h:290
void * userdata
Associated userdata with operation.
Definition: rtio.h:294
struct rtio_mpsc_node q
Definition: rtio.h:291
uint32_t flags
Flags associated with the operation.
Definition: rtio.h:295
int32_t result
Result from operation.
Definition: rtio.h:293
API that an RTIO IO device should implement.
Definition: rtio.h:429
void(* submit)(struct rtio_iodev_sqe *iodev_sqe)
Submit to the iodev an entry to work on.
Definition: rtio.h:438
Compute the mempool block index for a given pointer.
Definition: rtio.h:419
struct rtio_mpsc_node q
Definition: rtio.h:421
struct rtio_iodev_sqe * next
Definition: rtio.h:422
struct rtio_sqe sqe
Definition: rtio.h:420
struct rtio * r
Definition: rtio.h:423
An IO device with a function table for submitting requests.
Definition: rtio.h:444
struct rtio_mpsc iodev_sq
Definition: rtio.h:449
const struct rtio_iodev_api * api
Definition: rtio.h:446
void * data
Definition: rtio.h:452
Queue member.
Definition: rtio_mpsc.h:81
MPSC Queue.
Definition: rtio_mpsc.h:88
Definition: rtio.h:298
struct rtio_mpsc free_q
Definition: rtio.h:299
struct rtio_iodev_sqe * pool
Definition: rtio.h:302
const uint16_t pool_size
Definition: rtio.h:300
uint16_t pool_free
Definition: rtio.h:301
A submission queue event.
Definition: rtio.h:230
void * userdata
User provided data which is returned upon operation completion.
Definition: rtio.h:250
uint8_t * tx_buf
Definition: rtio.h:275
uint8_t op
Op code.
Definition: rtio.h:231
void * arg0
Last argument given to callback.
Definition: rtio.h:269
uint8_t * rx_buf
Definition: rtio.h:276
uint8_t prio
Op priority.
Definition: rtio.h:233
uint8_t * buf
Buffer to use.
Definition: rtio.h:257
uint32_t buf_len
Length of buffer.
Definition: rtio.h:256
const struct rtio_iodev * iodev
Device to operation on.
Definition: rtio.h:241
uint16_t flags
Op Flags.
Definition: rtio.h:235
uint8_t tiny_buf_len
Length of tiny buffer.
Definition: rtio.h:262
uint32_t txrx_buf_len
Definition: rtio.h:274
uint8_t tiny_buf[7]
Tiny buffer.
Definition: rtio.h:263
rtio_callback_t callback
Definition: rtio.h:268
uint16_t iodev_flags
Op iodev flags.
Definition: rtio.h:237
An RTIO context containing what can be viewed as a pair of queues.
Definition: rtio.h:323
struct rtio_cqe_pool * cqe_pool
Definition: rtio.h:353
struct rtio_mpsc cq
Definition: rtio.h:364
struct rtio_mpsc sq
Definition: rtio.h:361
atomic_t cq_count
Definition: rtio.h:342
struct rtio_sqe_pool * sqe_pool
Definition: rtio.h:350
atomic_t xcqcnt
Definition: rtio.h:347
Misc utilities.