Zephyr API Documentation 4.0.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
rtio.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2022 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
26#ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27#define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28
29#include <string.h>
30
32#include <zephyr/device.h>
33#include <zephyr/kernel.h>
34#include <zephyr/sys/__assert.h>
35#include <zephyr/sys/atomic.h>
37#include <zephyr/sys/util.h>
40
41#ifdef __cplusplus
42extern "C" {
43#endif
44
45
65#define RTIO_PRIO_LOW 0U
66
70#define RTIO_PRIO_NORM 127U
71
75#define RTIO_PRIO_HIGH 255U
76
96#define RTIO_SQE_CHAINED BIT(0)
97
108#define RTIO_SQE_TRANSACTION BIT(1)
109
110
120#define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
121
128#define RTIO_SQE_CANCELED BIT(3)
129
136#define RTIO_SQE_MULTISHOT BIT(4)
137
141#define RTIO_SQE_NO_RESPONSE BIT(5)
142
160#define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
161
162#define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
163
170#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
171
178#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
179
187#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt) \
188 (FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) | \
189 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
190
198#define RTIO_IODEV_I2C_STOP BIT(1)
199
203#define RTIO_IODEV_I2C_RESTART BIT(2)
204
208#define RTIO_IODEV_I2C_10_BITS BIT(3)
209
213#define RTIO_IODEV_I3C_STOP BIT(1)
214
218#define RTIO_IODEV_I3C_RESTART BIT(2)
219
223#define RTIO_IODEV_I3C_HDR BIT(3)
224
228#define RTIO_IODEV_I3C_NBCH BIT(4)
229
233#define RTIO_IODEV_I3C_HDR_MODE_MASK GENMASK(15, 8)
234
238#define RTIO_IODEV_I3C_HDR_MODE_SET(flags) \
239 FIELD_PREP(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
240
244#define RTIO_IODEV_I3C_HDR_MODE_GET(flags) \
245 FIELD_GET(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
246
250#define RTIO_IODEV_I3C_HDR_CMD_CODE_MASK GENMASK(22, 16)
251
255#define RTIO_IODEV_I3C_HDR_CMD_CODE_SET(flags) \
256 FIELD_PREP(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
257
261#define RTIO_IODEV_I3C_HDR_CMD_CODE_GET(flags) \
262 FIELD_GET(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
263
265struct rtio;
266struct rtio_cqe;
267struct rtio_sqe;
268struct rtio_sqe_pool;
269struct rtio_cqe_pool;
270struct rtio_iodev;
271struct rtio_iodev_sqe;
281typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
282
286struct rtio_sqe {
295 const struct rtio_iodev *iodev;
304 void *userdata;
305
306 union {
307
309 struct {
311 const uint8_t *buf;
312 } tx;
313
315 struct {
316 uint32_t buf_len;
318 } rx;
319
321 struct {
323 uint8_t buf[7];
324 } tiny_tx;
325
327 struct {
329 void *arg0;
330 } callback;
331
333 struct {
334 uint32_t buf_len;
337 } txrx;
338
341
343 struct {
344 /* enum i3c_config_type type; */
345 int type;
346 void *config;
347 } i3c_config;
348
350 /* struct i3c_ccc_payload *ccc_payload; */
352 };
353};
354
356/* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
357BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
370
377
384
396struct rtio {
397#ifdef CONFIG_RTIO_SUBMIT_SEM
398 /* A wait semaphore which may suspend the calling thread
399 * to wait for some number of completions when calling submit
400 */
401 struct k_sem *submit_sem;
402
403 uint32_t submit_count;
404#endif
405
406#ifdef CONFIG_RTIO_CONSUME_SEM
407 /* A wait semaphore which may suspend the calling thread
408 * to wait for some number of completions while consuming
409 * them from the completion queue
410 */
411 struct k_sem *consume_sem;
412#endif
413
414 /* Total number of completions */
416
417 /* Number of completions that were unable to be submitted with results
418 * due to the cq spsc being full
419 */
421
422 /* Submission queue object pool with free list */
424
425 /* Complete queue object pool with free list */
427
428#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
429 /* Mem block pool */
430 struct sys_mem_blocks *block_pool;
431#endif
432
433 /* Submission queue */
434 struct mpsc sq;
435
436 /* Completion queue */
437 struct mpsc cq;
438};
439
441extern struct k_mem_partition rtio_partition;
442
450static inline size_t rtio_mempool_block_size(const struct rtio *r)
451{
452#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
453 ARG_UNUSED(r);
454 return 0;
455#else
456 if (r == NULL || r->block_pool == NULL) {
457 return 0;
458 }
459 return BIT(r->block_pool->info.blk_sz_shift);
460#endif
461}
462
470#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
471static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
472{
473 uintptr_t addr = (uintptr_t)ptr;
474 struct sys_mem_blocks *mem_pool = r->block_pool;
475 uint32_t block_size = rtio_mempool_block_size(r);
476
477 uintptr_t buff = (uintptr_t)mem_pool->buffer;
478 uint32_t buff_size = mem_pool->info.num_blocks * block_size;
479
480 if (addr < buff || addr >= buff + buff_size) {
481 return UINT16_MAX;
482 }
483 return (addr - buff) / block_size;
484}
485#endif
486
493 struct rtio_sqe sqe;
494 struct mpsc_node q;
496 struct rtio *r;
497};
498
511 void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
512};
513
518 /* Function pointer table */
519 const struct rtio_iodev_api *api;
520
521 /* Data associated with this iodev */
522 void *data;
523};
524
526#define RTIO_OP_NOP 0
527
529#define RTIO_OP_RX (RTIO_OP_NOP+1)
530
532#define RTIO_OP_TX (RTIO_OP_RX+1)
533
535#define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
536
538#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
539
541#define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
542
544#define RTIO_OP_I2C_RECOVER (RTIO_OP_TXRX+1)
545
547#define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1)
548
550#define RTIO_OP_I3C_RECOVER (RTIO_OP_I2C_CONFIGURE+1)
551
553#define RTIO_OP_I3C_CONFIGURE (RTIO_OP_I3C_RECOVER+1)
554
556#define RTIO_OP_I3C_CCC (RTIO_OP_I3C_CONFIGURE+1)
557
561static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
562 const struct rtio_iodev *iodev,
563 void *userdata)
564{
565 memset(sqe, 0, sizeof(struct rtio_sqe));
566 sqe->op = RTIO_OP_NOP;
567 sqe->iodev = iodev;
568 sqe->userdata = userdata;
569}
570
574static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
575 const struct rtio_iodev *iodev,
576 int8_t prio,
577 uint8_t *buf,
578 uint32_t len,
579 void *userdata)
580{
581 memset(sqe, 0, sizeof(struct rtio_sqe));
582 sqe->op = RTIO_OP_RX;
583 sqe->prio = prio;
584 sqe->iodev = iodev;
585 sqe->rx.buf_len = len;
586 sqe->rx.buf = buf;
587 sqe->userdata = userdata;
588}
589
595static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
596 const struct rtio_iodev *iodev, int8_t prio,
597 void *userdata)
598{
599 rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
601}
602
603static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
604 const struct rtio_iodev *iodev, int8_t prio,
605 void *userdata)
606{
607 rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
609}
610
614static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
615 const struct rtio_iodev *iodev,
616 int8_t prio,
617 const uint8_t *buf,
618 uint32_t len,
619 void *userdata)
620{
621 memset(sqe, 0, sizeof(struct rtio_sqe));
622 sqe->op = RTIO_OP_TX;
623 sqe->prio = prio;
624 sqe->iodev = iodev;
625 sqe->tx.buf_len = len;
626 sqe->tx.buf = buf;
627 sqe->userdata = userdata;
628}
629
640static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
641 const struct rtio_iodev *iodev,
642 int8_t prio,
643 const uint8_t *tiny_write_data,
644 uint8_t tiny_write_len,
645 void *userdata)
646{
647 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf));
648
649 memset(sqe, 0, sizeof(struct rtio_sqe));
650 sqe->op = RTIO_OP_TINY_TX;
651 sqe->prio = prio;
652 sqe->iodev = iodev;
653 sqe->tiny_tx.buf_len = tiny_write_len;
654 memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len);
655 sqe->userdata = userdata;
656}
657
666static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
667 rtio_callback_t callback,
668 void *arg0,
669 void *userdata)
670{
671 memset(sqe, 0, sizeof(struct rtio_sqe));
672 sqe->op = RTIO_OP_CALLBACK;
673 sqe->prio = 0;
674 sqe->iodev = NULL;
675 sqe->callback.callback = callback;
676 sqe->callback.arg0 = arg0;
677 sqe->userdata = userdata;
678}
679
690static inline void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe,
691 rtio_callback_t callback,
692 void *arg0,
693 void *userdata)
694{
695 rtio_sqe_prep_callback(sqe, callback, arg0, userdata);
697}
698
702static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
703 const struct rtio_iodev *iodev,
704 int8_t prio,
705 const uint8_t *tx_buf,
706 uint8_t *rx_buf,
707 uint32_t buf_len,
708 void *userdata)
709{
710 memset(sqe, 0, sizeof(struct rtio_sqe));
711 sqe->op = RTIO_OP_TXRX;
712 sqe->prio = prio;
713 sqe->iodev = iodev;
714 sqe->txrx.buf_len = buf_len;
715 sqe->txrx.tx_buf = tx_buf;
716 sqe->txrx.rx_buf = rx_buf;
717 sqe->userdata = userdata;
718}
719
720static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
721{
722 struct mpsc_node *node = mpsc_pop(&pool->free_q);
723
724 if (node == NULL) {
725 return NULL;
726 }
727
728 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
729
730 pool->pool_free--;
731
732 return iodev_sqe;
733}
734
735static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
736{
737 mpsc_push(&pool->free_q, &iodev_sqe->q);
738
739 pool->pool_free++;
740}
741
742static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
743{
744 struct mpsc_node *node = mpsc_pop(&pool->free_q);
745
746 if (node == NULL) {
747 return NULL;
748 }
749
750 struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
751
752 memset(cqe, 0, sizeof(struct rtio_cqe));
753
754 pool->pool_free--;
755
756 return cqe;
757}
758
759static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
760{
761 mpsc_push(&pool->free_q, &cqe->q);
762
763 pool->pool_free++;
764}
765
766static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
767 size_t max_sz, uint8_t **buf, uint32_t *buf_len)
768{
769#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
770 ARG_UNUSED(r);
771 ARG_UNUSED(min_sz);
772 ARG_UNUSED(max_sz);
773 ARG_UNUSED(buf);
774 ARG_UNUSED(buf_len);
775 return -ENOTSUP;
776#else
777 const uint32_t block_size = rtio_mempool_block_size(r);
778 uint32_t bytes = max_sz;
779
780 /* Not every context has a block pool and the block size may return 0 in
781 * that case
782 */
783 if (block_size == 0) {
784 return -ENOMEM;
785 }
786
787 do {
788 size_t num_blks = DIV_ROUND_UP(bytes, block_size);
789 int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
790
791 if (rc == 0) {
792 *buf_len = num_blks * block_size;
793 return 0;
794 }
795
796 if (bytes <= block_size) {
797 break;
798 }
799
800 bytes -= block_size;
801 } while (bytes >= min_sz);
802
803 return -ENOMEM;
804#endif
805}
806
807static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
808{
809#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
810 ARG_UNUSED(r);
811 ARG_UNUSED(buf);
812 ARG_UNUSED(buf_len);
813#else
814 size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
815
816 sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
817#endif
818}
819
820/* Do not try and reformat the macros */
821/* clang-format off */
822
830#define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data) \
831 STRUCT_SECTION_ITERABLE(rtio_iodev, name) = { \
832 .api = (iodev_api), \
833 .data = (iodev_data), \
834 }
835
836#define Z_RTIO_SQE_POOL_DEFINE(name, sz) \
837 static struct rtio_iodev_sqe CONCAT(_sqe_pool_, name)[sz]; \
838 STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = { \
839 .free_q = MPSC_INIT((name.free_q)), \
840 .pool_size = sz, \
841 .pool_free = sz, \
842 .pool = CONCAT(_sqe_pool_, name), \
843 }
844
845
846#define Z_RTIO_CQE_POOL_DEFINE(name, sz) \
847 static struct rtio_cqe CONCAT(_cqe_pool_, name)[sz]; \
848 STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = { \
849 .free_q = MPSC_INIT((name.free_q)), \
850 .pool_size = sz, \
851 .pool_free = sz, \
852 .pool = CONCAT(_cqe_pool_, name), \
853 }
854
864#define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
865
875#define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
876
877#define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align) \
878 RTIO_BMEM uint8_t __aligned(WB_UP(blk_align)) \
879 CONCAT(_block_pool_, name)[blk_cnt*WB_UP(blk_sz)]; \
880 _SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt, \
881 CONCAT(_block_pool_, name), RTIO_DMEM)
882
883#define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool) \
884 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, \
885 (static K_SEM_DEFINE(CONCAT(_submit_sem_, name), 0, K_SEM_MAX_LIMIT))) \
886 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, \
887 (static K_SEM_DEFINE(CONCAT(_consume_sem_, name), 0, K_SEM_MAX_LIMIT))) \
888 STRUCT_SECTION_ITERABLE(rtio, name) = { \
889 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &CONCAT(_submit_sem_, name),)) \
890 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,)) \
891 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &CONCAT(_consume_sem_, name),))\
892 .cq_count = ATOMIC_INIT(0), \
893 .xcqcnt = ATOMIC_INIT(0), \
894 .sqe_pool = _sqe_pool, \
895 .cqe_pool = _cqe_pool, \
896 IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,)) \
897 .sq = MPSC_INIT((name.sq)), \
898 .cq = MPSC_INIT((name.cq)), \
899 }
900
908#define RTIO_DEFINE(name, sq_sz, cq_sz) \
909 Z_RTIO_SQE_POOL_DEFINE(CONCAT(name, _sqe_pool), sq_sz); \
910 Z_RTIO_CQE_POOL_DEFINE(CONCAT(name, _cqe_pool), cq_sz); \
911 Z_RTIO_DEFINE(name, &CONCAT(name, _sqe_pool), \
912 &CONCAT(name, _cqe_pool), NULL)
913
914/* clang-format on */
915
926#define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
927 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
928 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
929 Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
930 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
931
932/* clang-format on */
933
941static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
942{
943 return r->sqe_pool->pool_free;
944}
945
954static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
955{
956 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
957 return iodev_sqe->next;
958 } else {
959 return NULL;
960 }
961}
962
963
972static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
973{
974 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
975 return iodev_sqe->next;
976 } else {
977 return NULL;
978 }
979}
980
989static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
990{
991 return iodev_sqe->next;
992}
993
1002static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
1003{
1004 struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
1005
1006 if (iodev_sqe == NULL) {
1007 return NULL;
1008 }
1009
1010 mpsc_push(&r->sq, &iodev_sqe->q);
1011
1012 return &iodev_sqe->sqe;
1013}
1014
1020static inline void rtio_sqe_drop_all(struct rtio *r)
1021{
1022 struct rtio_iodev_sqe *iodev_sqe;
1023 struct mpsc_node *node = mpsc_pop(&r->sq);
1024
1025 while (node != NULL) {
1026 iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
1027 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
1028 node = mpsc_pop(&r->sq);
1029 }
1030}
1031
1035static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
1036{
1037 struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
1038
1039 if (cqe == NULL) {
1040 return NULL;
1041 }
1042
1043 memset(cqe, 0, sizeof(struct rtio_cqe));
1044
1045 return cqe;
1046}
1047
1051static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
1052{
1053 mpsc_push(&r->cq, &cqe->q);
1054}
1055
1067static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
1068{
1069 struct mpsc_node *node;
1070 struct rtio_cqe *cqe = NULL;
1071
1072#ifdef CONFIG_RTIO_CONSUME_SEM
1073 if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
1074 return NULL;
1075 }
1076#endif
1077
1078 node = mpsc_pop(&r->cq);
1079 if (node == NULL) {
1080 return NULL;
1081 }
1082 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1083
1084 return cqe;
1085}
1086
1097static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
1098{
1099 struct mpsc_node *node;
1100 struct rtio_cqe *cqe;
1101
1102#ifdef CONFIG_RTIO_CONSUME_SEM
1103 k_sem_take(r->consume_sem, K_FOREVER);
1104#endif
1105 node = mpsc_pop(&r->cq);
1106 while (node == NULL) {
1107 Z_SPIN_DELAY(1);
1108 node = mpsc_pop(&r->cq);
1109 }
1110 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1111
1112 return cqe;
1113}
1114
1121static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1122{
1123 rtio_cqe_pool_free(r->cqe_pool, cqe);
1124}
1125
1132static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1133{
1134 uint32_t flags = 0;
1135
1136#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1137 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1138 struct rtio *r = iodev_sqe->r;
1139 struct sys_mem_blocks *mem_pool = r->block_pool;
1140 int blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >>
1141 mem_pool->info.blk_sz_shift;
1142 int blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift;
1143
1144 flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1145 }
1146#else
1147 ARG_UNUSED(iodev_sqe);
1148#endif
1149
1150 return flags;
1151}
1152
1168__syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1169 uint8_t **buff, uint32_t *buff_len);
1170
1171static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1172 uint8_t **buff, uint32_t *buff_len)
1173{
1174#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1176 int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1177 int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1179
1180 *buff = r->block_pool->buffer + blk_idx * blk_size;
1181 *buff_len = blk_count * blk_size;
1182 __ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
1183 __ASSERT_NO_MSG(*buff <
1184 r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
1185 return 0;
1186 }
1187 return -EINVAL;
1188#else
1189 ARG_UNUSED(r);
1190 ARG_UNUSED(cqe);
1191 ARG_UNUSED(buff);
1192 ARG_UNUSED(buff_len);
1193
1194 return -ENOTSUP;
1195#endif
1196}
1197
1199void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1200void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1201
1210static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1211{
1212 rtio_executor_ok(iodev_sqe, result);
1213}
1214
1223static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1224{
1225 rtio_executor_err(iodev_sqe, result);
1226}
1227
1239static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1240{
1241 struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1242
1243 if (cqe == NULL) {
1244 atomic_inc(&r->xcqcnt);
1245 } else {
1246 cqe->result = result;
1247 cqe->userdata = userdata;
1248 cqe->flags = flags;
1249 rtio_cqe_produce(r, cqe);
1250 }
1251
1252 atomic_inc(&r->cq_count);
1253#ifdef CONFIG_RTIO_SUBMIT_SEM
1254 if (r->submit_count > 0) {
1255 r->submit_count--;
1256 if (r->submit_count == 0) {
1257 k_sem_give(r->submit_sem);
1258 }
1259 }
1260#endif
1261#ifdef CONFIG_RTIO_CONSUME_SEM
1262 k_sem_give(r->consume_sem);
1263#endif
1264}
1265
1266#define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1267
1280static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1281 uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1282{
1283 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1284
1285#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1286 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1287 struct rtio *r = iodev_sqe->r;
1288
1289 if (sqe->rx.buf != NULL) {
1290 if (sqe->rx.buf_len < min_buf_len) {
1291 return -ENOMEM;
1292 }
1293 *buf = sqe->rx.buf;
1294 *buf_len = sqe->rx.buf_len;
1295 return 0;
1296 }
1297
1298 int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
1299 if (rc == 0) {
1300 sqe->rx.buf = *buf;
1301 sqe->rx.buf_len = *buf_len;
1302 return 0;
1303 }
1304
1305 return -ENOMEM;
1306 }
1307#else
1308 ARG_UNUSED(max_buf_len);
1309#endif
1310
1311 if (sqe->rx.buf_len < min_buf_len) {
1312 return -ENOMEM;
1313 }
1314
1315 *buf = sqe->rx.buf;
1316 *buf_len = sqe->rx.buf_len;
1317 return 0;
1318}
1319
1334__syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1335
1336static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1337{
1338#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1339 if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1340 return;
1341 }
1342
1343 rtio_block_pool_free(r, buff, buff_len);
1344#else
1345 ARG_UNUSED(r);
1346 ARG_UNUSED(buff);
1347 ARG_UNUSED(buff_len);
1348#endif
1349}
1350
1354static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1355{
1357
1358#ifdef CONFIG_RTIO_SUBMIT_SEM
1359 k_object_access_grant(r->submit_sem, t);
1360#endif
1361
1362#ifdef CONFIG_RTIO_CONSUME_SEM
1363 k_object_access_grant(r->consume_sem, t);
1364#endif
1365}
1366
1377__syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1378
1379static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1380{
1381 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1382
1383 do {
1384 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1385 iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1386 } while (iodev_sqe != NULL);
1387
1388 return 0;
1389}
1390
1406__syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1407 struct rtio_sqe **handle, size_t sqe_count);
1408
1409static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1410 struct rtio_sqe **handle,
1411 size_t sqe_count)
1412{
1413 struct rtio_sqe *sqe;
1414 uint32_t acquirable = rtio_sqe_acquirable(r);
1415
1416 if (acquirable < sqe_count) {
1417 return -ENOMEM;
1418 }
1419
1420 for (unsigned long i = 0; i < sqe_count; i++) {
1421 sqe = rtio_sqe_acquire(r);
1422 __ASSERT_NO_MSG(sqe != NULL);
1423 if (handle != NULL && i == 0) {
1424 *handle = sqe;
1425 }
1426 *sqe = sqes[i];
1427 }
1428
1429 return 0;
1430}
1431
1448static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1449{
1450 return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1451}
1452
1468__syscall int rtio_cqe_copy_out(struct rtio *r,
1469 struct rtio_cqe *cqes,
1470 size_t cqe_count,
1471 k_timeout_t timeout);
1472static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1473 struct rtio_cqe *cqes,
1474 size_t cqe_count,
1475 k_timeout_t timeout)
1476{
1477 size_t copied = 0;
1478 struct rtio_cqe *cqe;
1479 k_timepoint_t end = sys_timepoint_calc(timeout);
1480
1481 do {
1484 if (cqe == NULL) {
1485 Z_SPIN_DELAY(25);
1486 continue;
1487 }
1488 cqes[copied++] = *cqe;
1489 rtio_cqe_release(r, cqe);
1490 } while (copied < cqe_count && !sys_timepoint_expired(end));
1491
1492 return copied;
1493}
1494
1508__syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1509
1510static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1511{
1512 int res = 0;
1513
1514#ifdef CONFIG_RTIO_SUBMIT_SEM
1515 /* TODO undefined behavior if another thread calls submit of course
1516 */
1517 if (wait_count > 0) {
1518 __ASSERT(!k_is_in_isr(),
1519 "expected rtio submit with wait count to be called from a thread");
1520
1521 k_sem_reset(r->submit_sem);
1522 r->submit_count = wait_count;
1523 }
1524#else
1525 uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count) + wait_count;
1526#endif
1527
1528 /* Submit the queue to the executor which consumes submissions
1529 * and produces completions through ISR chains or other means.
1530 */
1532
1533
1534 /* TODO could be nicer if we could suspend the thread and not
1535 * wake up on each completion here.
1536 */
1537#ifdef CONFIG_RTIO_SUBMIT_SEM
1538
1539 if (wait_count > 0) {
1540 res = k_sem_take(r->submit_sem, K_FOREVER);
1541 __ASSERT(res == 0,
1542 "semaphore was reset or timed out while waiting on completions!");
1543 }
1544#else
1545 while ((uintptr_t)atomic_get(&r->cq_count) < cq_count) {
1546 Z_SPIN_DELAY(10);
1547 k_yield();
1548 }
1549#endif
1550
1551 return res;
1552}
1553
1558#ifdef __cplusplus
1559}
1560#endif
1561
1562#include <zephyr/syscalls/rtio.h>
1563
1564#endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
workaround assembler barfing for ST r
Definition asm-macro-32-bit-gnu.h:24
long atomic_t
Definition atomic_types.h:15
atomic_val_t atomic_get(const atomic_t *target)
Atomic get.
atomic_val_t atomic_inc(atomic_t *target)
Atomic increment.
#define K_FOREVER
Generate infinite timeout delay.
Definition kernel.h:1446
#define K_NO_WAIT
Generate null timeout delay.
Definition kernel.h:1336
k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
Calculate a timepoint value.
static bool sys_timepoint_expired(k_timepoint_t timepoint)
Indicates if timepoint is expired.
Definition sys_clock.h:318
#define K_TIMEOUT_EQ(a, b)
Compare timeouts for equality.
Definition sys_clock.h:80
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int sys_mem_blocks_free_contiguous(sys_mem_blocks_t *mem_block, void *block, size_t count)
Free contiguous multiple memory blocks.
int sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t *mem_block, size_t count, void **out_block)
Allocate a contiguous set of memory blocks.
static ALWAYS_INLINE void mpsc_push(struct mpsc *q, struct mpsc_node *n)
Push a node.
Definition mpsc_lockfree.h:126
static struct mpsc_node * mpsc_pop(struct mpsc *q)
Pop a node off of the list.
Definition mpsc_lockfree.h:145
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags)
Get the block count of a mempool flags.
Definition rtio.h:178
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags)
Get the block index of a mempool flags.
Definition rtio.h:170
#define RTIO_CQE_FLAG_MEMPOOL_BUFFER
The entry's buffer was allocated from the RTIO's mempool.
Definition rtio.h:160
#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)
Prepare CQE flags for a mempool read.
Definition rtio.h:187
#define RTIO_CQE_FLAG_GET(flags)
Definition rtio.h:162
#define RTIO_SQE_MULTISHOT
The SQE should continue producing CQEs until canceled.
Definition rtio.h:136
#define RTIO_SQE_TRANSACTION
The next request in the queue is part of a transaction.
Definition rtio.h:108
#define RTIO_SQE_MEMPOOL_BUFFER
The buffer should be allocated by the RTIO mempool.
Definition rtio.h:120
#define RTIO_SQE_CANCELED
The SQE should not execute if possible.
Definition rtio.h:128
#define RTIO_SQE_NO_RESPONSE
The SQE does not produce a CQE.
Definition rtio.h:141
#define RTIO_SQE_CHAINED
The next request in the queue should wait on this one.
Definition rtio.h:96
static void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare a read op submission with context's mempool.
Definition rtio.h:595
void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result)
#define RTIO_OP_CALLBACK
An operation that calls a given function (callback)
Definition rtio.h:538
static uint32_t rtio_sqe_acquirable(struct rtio *r)
Count of acquirable submission queue events.
Definition rtio.h:941
static void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
Definition rtio.h:759
static void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tiny_write_data, uint8_t tiny_write_len, void *userdata)
Prepare a tiny write op submission.
Definition rtio.h:640
static size_t rtio_mempool_block_size(const struct rtio *r)
Get the mempool block size of the RTIO context.
Definition rtio.h:450
static void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
Submit a completion queue event with a given result and userdata.
Definition rtio.h:1239
static void rtio_sqe_prep_nop(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, void *userdata)
Prepare a nop (no op) submission.
Definition rtio.h:561
void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
Release memory that was allocated by the RTIO's memory pool.
static int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
Copy an array of SQEs into the queue.
Definition rtio.h:1448
static void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
Produce a complete queue event if available.
Definition rtio.h:1051
#define RTIO_OP_TINY_TX
An operation that transmits tiny writes by copying the data to write.
Definition rtio.h:535
static uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
Compute the CQE flags from the rtio_iodev_sqe entry.
Definition rtio.h:1132
void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
static int rtio_block_pool_alloc(struct rtio *r, size_t min_sz, size_t max_sz, uint8_t **buf, uint32_t *buf_len)
Definition rtio.h:766
static void rtio_sqe_prep_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *buf, uint32_t len, void *userdata)
Prepare a write op submission.
Definition rtio.h:614
int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes, struct rtio_sqe **handle, size_t sqe_count)
Copy an array of SQEs into the queue and get resulting handles back.
static struct rtio_cqe * rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
Definition rtio.h:742
struct k_mem_partition rtio_partition
The memory partition associated with all RTIO context information.
static void rtio_sqe_prep_read(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *buf, uint32_t len, void *userdata)
Prepare a read op submission.
Definition rtio.h:574
static struct rtio_sqe * rtio_sqe_acquire(struct rtio *r)
Acquire a single submission queue event if available.
Definition rtio.h:1002
#define RTIO_OP_TX
An operation that transmits (writes)
Definition rtio.h:532
static void rtio_sqe_drop_all(struct rtio *r)
Drop all previously acquired sqe.
Definition rtio.h:1020
static void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Definition rtio.h:603
int rtio_cqe_copy_out(struct rtio *r, struct rtio_cqe *cqes, size_t cqe_count, k_timeout_t timeout)
Copy an array of CQEs from the queue.
static void rtio_sqe_prep_callback(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission.
Definition rtio.h:666
static void rtio_access_grant(struct rtio *r, struct k_thread *t)
Grant access to an RTIO context to a user thread.
Definition rtio.h:1354
#define RTIO_OP_TXRX
An operation that transceives (reads and writes simultaneously)
Definition rtio.h:541
static void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
Release consumed completion queue event.
Definition rtio.h:1121
static int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len, uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
Get the buffer associate with the RX submission.
Definition rtio.h:1280
static void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submissions completion with error.
Definition rtio.h:1223
static void rtio_sqe_prep_transceive(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tx_buf, uint8_t *rx_buf, uint32_t buf_len, void *userdata)
Prepare a transceive op submission.
Definition rtio.h:702
int rtio_sqe_cancel(struct rtio_sqe *sqe)
Attempt to cancel an SQE.
static void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
Definition rtio.h:735
static void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submission completion with success.
Definition rtio.h:1210
void(* rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0)
Callback signature for RTIO_OP_CALLBACK.
Definition rtio.h:281
#define RTIO_OP_NOP
An operation that does nothing and will complete immediately.
Definition rtio.h:526
static struct rtio_cqe * rtio_cqe_acquire(struct rtio *r)
Acquire a complete queue event if available.
Definition rtio.h:1035
static struct rtio_iodev_sqe * rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain.
Definition rtio.h:972
static struct rtio_cqe * rtio_cqe_consume(struct rtio *r)
Consume a single completion queue event if available.
Definition rtio.h:1067
static struct rtio_iodev_sqe * rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
Definition rtio.h:720
static struct rtio_iodev_sqe * rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain or transaction.
Definition rtio.h:989
static void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission that does not create a CQE.
Definition rtio.h:690
int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe, uint8_t **buff, uint32_t *buff_len)
Retrieve the mempool buffer that was allocated for the CQE.
static struct rtio_iodev_sqe * rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the transaction.
Definition rtio.h:954
void rtio_executor_submit(struct rtio *r)
static struct rtio_cqe * rtio_cqe_consume_block(struct rtio *r)
Wait for and consume a single completion queue event.
Definition rtio.h:1097
static void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
Definition rtio.h:807
#define RTIO_OP_RX
An operation that receives (reads)
Definition rtio.h:529
int rtio_submit(struct rtio *r, uint32_t wait_count)
Submit I/O requests to the underlying executor.
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
#define BIT(n)
Unsigned integer with bit position n set (signed in assembly language).
Definition util_macro.h:44
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition util.h:284
#define DIV_ROUND_UP(n, d)
Divide and round up.
Definition util.h:352
#define EINVAL
Invalid argument.
Definition errno.h:60
#define ENOMEM
Not enough core.
Definition errno.h:50
#define ENOTSUP
Unsupported value.
Definition errno.h:114
void k_yield(void)
Yield the current thread.
void k_object_access_grant(const void *object, struct k_thread *thread)
Grant a thread access to a kernel object.
Public kernel APIs.
Memory Blocks Allocator.
A wait-free intrusive multi producer single consumer (MPSC) queue using a singly linked list.
flags
Definition parser.h:96
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__INT32_TYPE__ int32_t
Definition stdint.h:74
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
#define UINT16_MAX
Definition stdint.h:28
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
__UINT16_TYPE__ uint16_t
Definition stdint.h:89
__INT8_TYPE__ int8_t
Definition stdint.h:72
void * memset(void *buf, int c, size_t n)
void * memcpy(void *ZRESTRICT d, const void *ZRESTRICT s, size_t n)
Memory Partition.
Definition mem_domain.h:55
Thread Structure.
Definition thread.h:259
Kernel timeout type.
Definition sys_clock.h:65
Kernel timepoint type.
Definition sys_clock.h:225
Queue member.
Definition mpsc_lockfree.h:79
MPSC Queue.
Definition mpsc_lockfree.h:86
Definition rtio.h:378
struct rtio_cqe * pool
Definition rtio.h:382
struct mpsc free_q
Definition rtio.h:379
const uint16_t pool_size
Definition rtio.h:380
uint16_t pool_free
Definition rtio.h:381
A completion queue event.
Definition rtio.h:363
void * userdata
Associated userdata with operation.
Definition rtio.h:367
struct mpsc_node q
Definition rtio.h:364
uint32_t flags
Flags associated with the operation.
Definition rtio.h:368
int32_t result
Result from operation.
Definition rtio.h:366
API that an RTIO IO device should implement.
Definition rtio.h:502
void(* submit)(struct rtio_iodev_sqe *iodev_sqe)
Submit to the iodev an entry to work on.
Definition rtio.h:511
Compute the mempool block index for a given pointer.
Definition rtio.h:492
struct rtio_iodev_sqe * next
Definition rtio.h:495
struct rtio_sqe sqe
Definition rtio.h:493
struct rtio * r
Definition rtio.h:496
struct mpsc_node q
Definition rtio.h:494
An IO device with a function table for submitting requests.
Definition rtio.h:517
const struct rtio_iodev_api * api
Definition rtio.h:519
void * data
Definition rtio.h:522
Definition rtio.h:371
struct rtio_iodev_sqe * pool
Definition rtio.h:375
const uint16_t pool_size
Definition rtio.h:373
struct mpsc free_q
Definition rtio.h:372
uint16_t pool_free
Definition rtio.h:374
A submission queue event.
Definition rtio.h:286
uint32_t i2c_config
OP_I2C_CONFIGURE.
Definition rtio.h:340
void * userdata
User provided data which is returned upon operation completion.
Definition rtio.h:304
const uint8_t * tx_buf
Buffer to write from.
Definition rtio.h:335
uint8_t op
Op code.
Definition rtio.h:287
struct rtio_sqe::@430::@433 rx
OP_RX.
uint8_t buf_len
Length of tiny buffer.
Definition rtio.h:322
void * arg0
Last argument given to callback.
Definition rtio.h:329
uint8_t * rx_buf
Buffer to read into.
Definition rtio.h:336
uint8_t prio
Op priority.
Definition rtio.h:289
uint8_t * buf
Buffer to read into.
Definition rtio.h:317
uint32_t buf_len
Length of buffer.
Definition rtio.h:310
const struct rtio_iodev * iodev
Device to operation on.
Definition rtio.h:295
uint32_t iodev_flags
Op iodev flags.
Definition rtio.h:293
void * ccc_payload
OP_I3C_CCC.
Definition rtio.h:351
struct rtio_sqe::@430::@432 tx
OP_TX.
int type
Definition rtio.h:345
uint16_t flags
Op Flags.
Definition rtio.h:291
const uint8_t * buf
Buffer to write from.
Definition rtio.h:311
void * config
Definition rtio.h:346
struct rtio_sqe::@430::@436 txrx
OP_TXRX.
rtio_callback_t callback
Definition rtio.h:328
struct rtio_sqe::@430::@434 tiny_tx
OP_TINY_TX.
An RTIO context containing what can be viewed as a pair of queues.
Definition rtio.h:396
struct rtio_cqe_pool * cqe_pool
Definition rtio.h:426
struct mpsc sq
Definition rtio.h:434
atomic_t cq_count
Definition rtio.h:415
struct rtio_sqe_pool * sqe_pool
Definition rtio.h:423
atomic_t xcqcnt
Definition rtio.h:420
struct mpsc cq
Definition rtio.h:437
Misc utilities.