Zephyr API Documentation 4.2.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
rtio.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2022 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
25
26#ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27#define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28
29#include <string.h>
30
32#include <zephyr/device.h>
33#include <zephyr/kernel.h>
35#include <zephyr/sys/__assert.h>
36#include <zephyr/sys/atomic.h>
38#include <zephyr/sys/util.h>
41
42#ifdef __cplusplus
43extern "C" {
44#endif
45
46
55
62
66#define RTIO_PRIO_LOW 0U
67
71#define RTIO_PRIO_NORM 127U
72
76#define RTIO_PRIO_HIGH 255U
77
81
82
89
97#define RTIO_SQE_CHAINED BIT(0)
98
109#define RTIO_SQE_TRANSACTION BIT(1)
110
111
121#define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
122
129#define RTIO_SQE_CANCELED BIT(3)
130
137#define RTIO_SQE_MULTISHOT BIT(4)
138
142#define RTIO_SQE_NO_RESPONSE BIT(5)
143
147
154
161#define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
162
163#define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
164
171#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
172
179#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
180
188#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt) \
189 (FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) | \
190 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
191
195
199#define RTIO_IODEV_I2C_STOP BIT(1)
200
204#define RTIO_IODEV_I2C_RESTART BIT(2)
205
209#define RTIO_IODEV_I2C_10_BITS BIT(3)
210
214#define RTIO_IODEV_I3C_STOP BIT(1)
215
219#define RTIO_IODEV_I3C_RESTART BIT(2)
220
224#define RTIO_IODEV_I3C_HDR BIT(3)
225
229#define RTIO_IODEV_I3C_NBCH BIT(4)
230
234#define RTIO_IODEV_I3C_HDR_MODE_MASK GENMASK(15, 8)
235
239#define RTIO_IODEV_I3C_HDR_MODE_SET(flags) \
240 FIELD_PREP(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
241
245#define RTIO_IODEV_I3C_HDR_MODE_GET(flags) \
246 FIELD_GET(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
247
251#define RTIO_IODEV_I3C_HDR_CMD_CODE_MASK GENMASK(22, 16)
252
256#define RTIO_IODEV_I3C_HDR_CMD_CODE_SET(flags) \
257 FIELD_PREP(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
258
262#define RTIO_IODEV_I3C_HDR_CMD_CODE_GET(flags) \
263 FIELD_GET(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
264
266struct rtio;
267struct rtio_cqe;
268struct rtio_sqe;
269struct rtio_sqe_pool;
270struct rtio_cqe_pool;
271struct rtio_iodev;
272struct rtio_iodev_sqe;
274
283typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, int res, void *arg0);
284
291typedef void (*rtio_signaled_t)(struct rtio_iodev_sqe *iodev_sqe, void *userdata);
292
296struct rtio_sqe {
298
300
302
304
305 const struct rtio_iodev *iodev;
306
314 void *userdata;
315
316 union {
317
319 struct {
321 const uint8_t *buf;
322 } tx;
323
325 struct {
326 uint32_t buf_len;
328 } rx;
329
331 struct {
334 } tiny_tx;
335
337 struct {
339 void *arg0;
340 } callback;
341
343 struct {
344 uint32_t buf_len;
347 } txrx;
348
350 struct {
352 struct _timeout to;
353 } delay;
354
357
359 struct {
360 /* enum i3c_config_type type; */
361 int type;
362 void *config;
363 } i3c_config;
364
366 /* struct i3c_ccc_payload *ccc_payload; */
368
370 struct {
373 void *userdata;
374 } await;
375 };
376};
377
379/* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
380BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
382
393
400
407
419struct rtio {
420#ifdef CONFIG_RTIO_SUBMIT_SEM
421 /* A wait semaphore which may suspend the calling thread
422 * to wait for some number of completions when calling submit
423 */
424 struct k_sem *submit_sem;
425
426 uint32_t submit_count;
427#endif
428
429#ifdef CONFIG_RTIO_CONSUME_SEM
430 /* A wait semaphore which may suspend the calling thread
431 * to wait for some number of completions while consuming
432 * them from the completion queue
433 */
434 struct k_sem *consume_sem;
435#endif
436
437 /* Total number of completions */
439
440 /* Number of completions that were unable to be submitted with results
441 * due to the cq spsc being full
442 */
444
445 /* Submission queue object pool with free list */
447
448 /* Complete queue object pool with free list */
450
451#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
452 /* Mem block pool */
453 struct sys_mem_blocks *block_pool;
454#endif
455
456 /* Submission queue */
457 struct mpsc sq;
458
459 /* Completion queue */
460 struct mpsc cq;
461};
462
464extern struct k_mem_partition rtio_partition;
465
473static inline size_t rtio_mempool_block_size(const struct rtio *r)
474{
475#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
476 ARG_UNUSED(r);
477 return 0;
478#else
479 if (r == NULL || r->block_pool == NULL) {
480 return 0;
481 }
482 return BIT(r->block_pool->info.blk_sz_shift);
483#endif
484}
485
493#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
494static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
495{
496 uintptr_t addr = (uintptr_t)ptr;
497 struct sys_mem_blocks *mem_pool = r->block_pool;
498 uint32_t block_size = rtio_mempool_block_size(r);
499
500 uintptr_t buff = (uintptr_t)mem_pool->buffer;
501 uint32_t buff_size = mem_pool->info.num_blocks * block_size;
502
503 if (addr < buff || addr >= buff + buff_size) {
504 return UINT16_MAX;
505 }
506 return (addr - buff) / block_size;
507}
508#endif
509
516 struct rtio_sqe sqe;
517 struct mpsc_node q;
519 struct rtio *r;
520};
521
534 void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
535};
536
541 /* Function pointer table */
542 const struct rtio_iodev_api *api;
543
544 /* Data associated with this iodev */
545 void *data;
546};
547
549#define RTIO_OP_NOP 0
550
552#define RTIO_OP_RX (RTIO_OP_NOP+1)
553
555#define RTIO_OP_TX (RTIO_OP_RX+1)
556
558#define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
559
561#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
562
564#define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
565
567#define RTIO_OP_DELAY (RTIO_OP_TXRX+1)
568
570#define RTIO_OP_I2C_RECOVER (RTIO_OP_DELAY+1)
571
573#define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1)
574
576#define RTIO_OP_I3C_RECOVER (RTIO_OP_I2C_CONFIGURE+1)
577
579#define RTIO_OP_I3C_CONFIGURE (RTIO_OP_I3C_RECOVER+1)
580
582#define RTIO_OP_I3C_CCC (RTIO_OP_I3C_CONFIGURE+1)
583
585#define RTIO_OP_AWAIT (RTIO_OP_I3C_CCC+1)
586
590static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
591 const struct rtio_iodev *iodev,
592 void *userdata)
593{
594 memset(sqe, 0, sizeof(struct rtio_sqe));
595 sqe->op = RTIO_OP_NOP;
596 sqe->iodev = iodev;
597 sqe->userdata = userdata;
598}
599
603static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
604 const struct rtio_iodev *iodev,
605 int8_t prio,
606 uint8_t *buf,
607 uint32_t len,
608 void *userdata)
609{
610 memset(sqe, 0, sizeof(struct rtio_sqe));
611 sqe->op = RTIO_OP_RX;
612 sqe->prio = prio;
613 sqe->iodev = iodev;
614 sqe->rx.buf_len = len;
615 sqe->rx.buf = buf;
616 sqe->userdata = userdata;
617}
618
624static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
625 const struct rtio_iodev *iodev, int8_t prio,
626 void *userdata)
627{
628 rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
630}
631
632static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
633 const struct rtio_iodev *iodev, int8_t prio,
634 void *userdata)
635{
636 rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
638}
639
643static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
644 const struct rtio_iodev *iodev,
645 int8_t prio,
646 const uint8_t *buf,
647 uint32_t len,
648 void *userdata)
649{
650 memset(sqe, 0, sizeof(struct rtio_sqe));
651 sqe->op = RTIO_OP_TX;
652 sqe->prio = prio;
653 sqe->iodev = iodev;
654 sqe->tx.buf_len = len;
655 sqe->tx.buf = buf;
656 sqe->userdata = userdata;
657}
658
669static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
670 const struct rtio_iodev *iodev,
671 int8_t prio,
672 const uint8_t *tiny_write_data,
673 uint8_t tiny_write_len,
674 void *userdata)
675{
676 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf));
677
678 memset(sqe, 0, sizeof(struct rtio_sqe));
679 sqe->op = RTIO_OP_TINY_TX;
680 sqe->prio = prio;
681 sqe->iodev = iodev;
682 sqe->tiny_tx.buf_len = tiny_write_len;
683 memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len);
684 sqe->userdata = userdata;
685}
686
695static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
696 rtio_callback_t callback,
697 void *arg0,
698 void *userdata)
699{
700 memset(sqe, 0, sizeof(struct rtio_sqe));
701 sqe->op = RTIO_OP_CALLBACK;
702 sqe->prio = 0;
703 sqe->iodev = NULL;
704 sqe->callback.callback = callback;
705 sqe->callback.arg0 = arg0;
706 sqe->userdata = userdata;
707}
708
719static inline void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe,
720 rtio_callback_t callback,
721 void *arg0,
722 void *userdata)
723{
724 rtio_sqe_prep_callback(sqe, callback, arg0, userdata);
726}
727
731static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
732 const struct rtio_iodev *iodev,
733 int8_t prio,
734 const uint8_t *tx_buf,
735 uint8_t *rx_buf,
736 uint32_t buf_len,
737 void *userdata)
738{
739 memset(sqe, 0, sizeof(struct rtio_sqe));
740 sqe->op = RTIO_OP_TXRX;
741 sqe->prio = prio;
742 sqe->iodev = iodev;
743 sqe->txrx.buf_len = buf_len;
744 sqe->txrx.tx_buf = tx_buf;
745 sqe->txrx.rx_buf = rx_buf;
746 sqe->userdata = userdata;
747}
748
763static inline void rtio_sqe_prep_await(struct rtio_sqe *sqe,
764 const struct rtio_iodev *iodev,
765 int8_t prio,
766 void *userdata)
767{
768 memset(sqe, 0, sizeof(struct rtio_sqe));
769 sqe->op = RTIO_OP_AWAIT;
770 sqe->prio = prio;
771 sqe->iodev = iodev;
772 sqe->userdata = userdata;
773}
774
786static inline void rtio_sqe_prep_await_iodev(struct rtio_sqe *sqe, const struct rtio_iodev *iodev,
787 int8_t prio, void *userdata)
788{
789 __ASSERT_NO_MSG(iodev != NULL);
790 rtio_sqe_prep_await(sqe, iodev, prio, userdata);
791}
792
803static inline void rtio_sqe_prep_await_executor(struct rtio_sqe *sqe, int8_t prio, void *userdata)
804{
805 rtio_sqe_prep_await(sqe, NULL, prio, userdata);
806}
807
808static inline void rtio_sqe_prep_delay(struct rtio_sqe *sqe,
809 k_timeout_t timeout,
810 void *userdata)
811{
812 memset(sqe, 0, sizeof(struct rtio_sqe));
813 sqe->op = RTIO_OP_DELAY;
814 sqe->prio = 0;
815 sqe->iodev = NULL;
816 sqe->delay.timeout = timeout;
817 sqe->userdata = userdata;
818}
819
820static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
821{
822 struct mpsc_node *node = mpsc_pop(&pool->free_q);
823
824 if (node == NULL) {
825 return NULL;
826 }
827
828 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
829
830 pool->pool_free--;
831
832 return iodev_sqe;
833}
834
835static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
836{
837 mpsc_push(&pool->free_q, &iodev_sqe->q);
838
839 pool->pool_free++;
840}
841
842static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
843{
844 struct mpsc_node *node = mpsc_pop(&pool->free_q);
845
846 if (node == NULL) {
847 return NULL;
848 }
849
850 struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
851
852 memset(cqe, 0, sizeof(struct rtio_cqe));
853
854 pool->pool_free--;
855
856 return cqe;
857}
858
859static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
860{
861 mpsc_push(&pool->free_q, &cqe->q);
862
863 pool->pool_free++;
864}
865
866static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
867 size_t max_sz, uint8_t **buf, uint32_t *buf_len)
868{
869#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
870 ARG_UNUSED(r);
871 ARG_UNUSED(min_sz);
872 ARG_UNUSED(max_sz);
873 ARG_UNUSED(buf);
874 ARG_UNUSED(buf_len);
875 return -ENOTSUP;
876#else
877 const uint32_t block_size = rtio_mempool_block_size(r);
878 uint32_t bytes = max_sz;
879
880 /* Not every context has a block pool and the block size may return 0 in
881 * that case
882 */
883 if (block_size == 0) {
884 return -ENOMEM;
885 }
886
887 do {
888 size_t num_blks = DIV_ROUND_UP(bytes, block_size);
889 int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
890
891 if (rc == 0) {
892 *buf_len = num_blks * block_size;
893 return 0;
894 }
895
896 if (bytes <= block_size) {
897 break;
898 }
899
900 bytes -= block_size;
901 } while (bytes >= min_sz);
902
903 return -ENOMEM;
904#endif
905}
906
907static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
908{
909#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
910 ARG_UNUSED(r);
911 ARG_UNUSED(buf);
912 ARG_UNUSED(buf_len);
913#else
914 size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
915
916 sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
917#endif
918}
919
920/* Do not try and reformat the macros */
921/* clang-format off */
922
930#define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data) \
931 STRUCT_SECTION_ITERABLE(rtio_iodev, name) = { \
932 .api = (iodev_api), \
933 .data = (iodev_data), \
934 }
935
936#define Z_RTIO_SQE_POOL_DEFINE(name, sz) \
937 static struct rtio_iodev_sqe CONCAT(_sqe_pool_, name)[sz]; \
938 STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = { \
939 .free_q = MPSC_INIT((name.free_q)), \
940 .pool_size = sz, \
941 .pool_free = sz, \
942 .pool = CONCAT(_sqe_pool_, name), \
943 }
944
945
946#define Z_RTIO_CQE_POOL_DEFINE(name, sz) \
947 static struct rtio_cqe CONCAT(_cqe_pool_, name)[sz]; \
948 STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = { \
949 .free_q = MPSC_INIT((name.free_q)), \
950 .pool_size = sz, \
951 .pool_free = sz, \
952 .pool = CONCAT(_cqe_pool_, name), \
953 }
954
964#define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
965
975#define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
976
977#define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align) \
978 RTIO_BMEM uint8_t __aligned(WB_UP(blk_align)) \
979 CONCAT(_block_pool_, name)[blk_cnt*WB_UP(blk_sz)]; \
980 _SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt, \
981 CONCAT(_block_pool_, name), RTIO_DMEM)
982
983#define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool) \
984 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, \
985 (static K_SEM_DEFINE(CONCAT(_submit_sem_, name), 0, K_SEM_MAX_LIMIT))) \
986 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, \
987 (static K_SEM_DEFINE(CONCAT(_consume_sem_, name), 0, K_SEM_MAX_LIMIT))) \
988 STRUCT_SECTION_ITERABLE(rtio, name) = { \
989 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &CONCAT(_submit_sem_, name),)) \
990 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,)) \
991 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &CONCAT(_consume_sem_, name),))\
992 .cq_count = ATOMIC_INIT(0), \
993 .xcqcnt = ATOMIC_INIT(0), \
994 .sqe_pool = _sqe_pool, \
995 .cqe_pool = _cqe_pool, \
996 IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,)) \
997 .sq = MPSC_INIT((name.sq)), \
998 .cq = MPSC_INIT((name.cq)), \
999 }
1000
1008#define RTIO_DEFINE(name, sq_sz, cq_sz) \
1009 Z_RTIO_SQE_POOL_DEFINE(CONCAT(name, _sqe_pool), sq_sz); \
1010 Z_RTIO_CQE_POOL_DEFINE(CONCAT(name, _cqe_pool), cq_sz); \
1011 Z_RTIO_DEFINE(name, &CONCAT(name, _sqe_pool), \
1012 &CONCAT(name, _cqe_pool), NULL)
1013
1014/* clang-format on */
1015
1026#define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
1027 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
1028 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
1029 Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
1030 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
1031
1032/* clang-format on */
1033
1041static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
1042{
1043 return r->sqe_pool->pool_free;
1044}
1045
1054static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
1055{
1056 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
1057 return iodev_sqe->next;
1058 } else {
1059 return NULL;
1060 }
1061}
1062
1063
1072static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
1073{
1074 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
1075 return iodev_sqe->next;
1076 } else {
1077 return NULL;
1078 }
1079}
1080
1089static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
1090{
1091 return iodev_sqe->next;
1092}
1093
1102static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
1103{
1104 struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
1105
1106 if (iodev_sqe == NULL) {
1107 return NULL;
1108 }
1109
1110 mpsc_push(&r->sq, &iodev_sqe->q);
1111
1112 return &iodev_sqe->sqe;
1113}
1114
1120static inline void rtio_sqe_drop_all(struct rtio *r)
1121{
1122 struct rtio_iodev_sqe *iodev_sqe;
1123 struct mpsc_node *node = mpsc_pop(&r->sq);
1124
1125 while (node != NULL) {
1126 iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
1127 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
1128 node = mpsc_pop(&r->sq);
1129 }
1130}
1131
1135static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
1136{
1137 struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
1138
1139 if (cqe == NULL) {
1140 return NULL;
1141 }
1142
1143 memset(cqe, 0, sizeof(struct rtio_cqe));
1144
1145 return cqe;
1146}
1147
1151static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
1152{
1153 mpsc_push(&r->cq, &cqe->q);
1154}
1155
1167static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
1168{
1169 struct mpsc_node *node;
1170 struct rtio_cqe *cqe = NULL;
1171
1172#ifdef CONFIG_RTIO_CONSUME_SEM
1173 if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
1174 return NULL;
1175 }
1176#endif
1177
1178 node = mpsc_pop(&r->cq);
1179 if (node == NULL) {
1180 return NULL;
1181 }
1182 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1183
1184 return cqe;
1185}
1186
1197static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
1198{
1199 struct mpsc_node *node;
1200 struct rtio_cqe *cqe;
1201
1202#ifdef CONFIG_RTIO_CONSUME_SEM
1203 k_sem_take(r->consume_sem, K_FOREVER);
1204#endif
1205 node = mpsc_pop(&r->cq);
1206 while (node == NULL) {
1207 Z_SPIN_DELAY(1);
1208 node = mpsc_pop(&r->cq);
1209 }
1210 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1211
1212 return cqe;
1213}
1214
1221static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1222{
1223 rtio_cqe_pool_free(r->cqe_pool, cqe);
1224}
1225
1234static inline int rtio_flush_completion_queue(struct rtio *r)
1235{
1236 struct rtio_cqe *cqe;
1237 int res = 0;
1238
1239 do {
1240 cqe = rtio_cqe_consume(r);
1241 if (cqe != NULL) {
1242 if ((cqe->result < 0) && (res == 0)) {
1243 res = cqe->result;
1244 }
1245 rtio_cqe_release(r, cqe);
1246 }
1247 } while (cqe != NULL);
1248
1249 return res;
1250}
1251
1258static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1259{
1260 uint32_t flags = 0;
1261
1262#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1263 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1264 struct rtio *r = iodev_sqe->r;
1265 struct sys_mem_blocks *mem_pool = r->block_pool;
1266 unsigned int blk_index = 0;
1267 unsigned int blk_count = 0;
1268
1269 if (iodev_sqe->sqe.rx.buf) {
1270 blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >>
1271 mem_pool->info.blk_sz_shift;
1272 blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift;
1273 }
1274 flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1275 }
1276#else
1277 ARG_UNUSED(iodev_sqe);
1278#endif
1279
1280 return flags;
1281}
1282
1298__syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1299 uint8_t **buff, uint32_t *buff_len);
1300
1301static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1302 uint8_t **buff, uint32_t *buff_len)
1303{
1304#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1306 unsigned int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1307 unsigned int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1309
1310 *buff_len = blk_count * blk_size;
1311
1312 if (blk_count > 0) {
1313 *buff = r->block_pool->buffer + blk_idx * blk_size;
1314
1315 __ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
1316 __ASSERT_NO_MSG(*buff <
1317 r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
1318 } else {
1319 *buff = NULL;
1320 }
1321 return 0;
1322 }
1323 return -EINVAL;
1324#else
1325 ARG_UNUSED(r);
1326 ARG_UNUSED(cqe);
1327 ARG_UNUSED(buff);
1328 ARG_UNUSED(buff_len);
1329
1330 return -ENOTSUP;
1331#endif
1332}
1333
1335void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1336void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1337
1346static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1347{
1348 rtio_executor_ok(iodev_sqe, result);
1349}
1350
1359static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1360{
1361 rtio_executor_err(iodev_sqe, result);
1362}
1363
1375static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1376{
1377 struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1378
1379 if (cqe == NULL) {
1380 atomic_inc(&r->xcqcnt);
1381 } else {
1382 cqe->result = result;
1383 cqe->userdata = userdata;
1384 cqe->flags = flags;
1385 rtio_cqe_produce(r, cqe);
1386#ifdef CONFIG_RTIO_CONSUME_SEM
1387 k_sem_give(r->consume_sem);
1388#endif
1389 }
1390
1391 /* atomic_t isn't guaranteed to wrap correctly as it could be signed, so
1392 * we must resort to a cas loop.
1393 */
1394 atomic_t val, new_val;
1395
1396 do {
1397 val = atomic_get(&r->cq_count);
1398 new_val = (atomic_t)((uintptr_t)val + 1);
1399 } while (!atomic_cas(&r->cq_count, val, new_val));
1400
1401#ifdef CONFIG_RTIO_SUBMIT_SEM
1402 if (r->submit_count > 0) {
1403 r->submit_count--;
1404 if (r->submit_count == 0) {
1405 k_sem_give(r->submit_sem);
1406 }
1407 }
1408#endif
1409}
1410
1411#define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1412
1425static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1426 uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1427{
1428 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1429
1430#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1431 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1432 struct rtio *r = iodev_sqe->r;
1433
1434 if (sqe->rx.buf != NULL) {
1435 if (sqe->rx.buf_len < min_buf_len) {
1436 return -ENOMEM;
1437 }
1438 *buf = sqe->rx.buf;
1439 *buf_len = sqe->rx.buf_len;
1440 return 0;
1441 }
1442
1443 int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
1444 if (rc == 0) {
1445 sqe->rx.buf = *buf;
1446 sqe->rx.buf_len = *buf_len;
1447 return 0;
1448 }
1449
1450 return -ENOMEM;
1451 }
1452#else
1453 ARG_UNUSED(max_buf_len);
1454#endif
1455
1456 if (sqe->rx.buf_len < min_buf_len) {
1457 return -ENOMEM;
1458 }
1459
1460 *buf = sqe->rx.buf;
1461 *buf_len = sqe->rx.buf_len;
1462 return 0;
1463}
1464
1479__syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1480
1481static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1482{
1483#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1484 if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1485 return;
1486 }
1487
1488 rtio_block_pool_free(r, buff, buff_len);
1489#else
1490 ARG_UNUSED(r);
1491 ARG_UNUSED(buff);
1492 ARG_UNUSED(buff_len);
1493#endif
1494}
1495
1502static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1503{
1505
1506#ifdef CONFIG_RTIO_SUBMIT_SEM
1507 k_object_access_grant(r->submit_sem, t);
1508#endif
1509
1510#ifdef CONFIG_RTIO_CONSUME_SEM
1511 k_object_access_grant(r->consume_sem, t);
1512#endif
1513}
1514
1515
1522static inline void rtio_access_revoke(struct rtio *r, struct k_thread *t)
1523{
1525
1526#ifdef CONFIG_RTIO_SUBMIT_SEM
1527 k_object_access_revoke(r->submit_sem, t);
1528#endif
1529
1530#ifdef CONFIG_RTIO_CONSUME_SEM
1531 k_object_access_revoke(r->consume_sem, t);
1532#endif
1533}
1534
1545__syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1546
1547static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1548{
1549 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1550
1551 do {
1552 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1553 iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1554 } while (iodev_sqe != NULL);
1555
1556 return 0;
1557}
1558
1570__syscall void rtio_sqe_signal(struct rtio_sqe *sqe);
1571
1572static inline void z_impl_rtio_sqe_signal(struct rtio_sqe *sqe)
1573{
1574 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1575
1576 if (!atomic_cas(&iodev_sqe->sqe.await.ok, 0, 1)) {
1577 iodev_sqe->sqe.await.callback(iodev_sqe, iodev_sqe->sqe.await.userdata);
1578 }
1579}
1580
1591static inline void rtio_iodev_sqe_await_signal(struct rtio_iodev_sqe *iodev_sqe,
1592 rtio_signaled_t callback,
1593 void *userdata)
1594{
1595 iodev_sqe->sqe.await.callback = callback;
1596 iodev_sqe->sqe.await.userdata = userdata;
1597
1598 if (!atomic_cas(&iodev_sqe->sqe.await.ok, 0, 1)) {
1599 callback(iodev_sqe, userdata);
1600 }
1601}
1602
1618__syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1619 struct rtio_sqe **handle, size_t sqe_count);
1620
1621static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1622 struct rtio_sqe **handle,
1623 size_t sqe_count)
1624{
1625 struct rtio_sqe *sqe;
1626 uint32_t acquirable = rtio_sqe_acquirable(r);
1627
1628 if (acquirable < sqe_count) {
1629 return -ENOMEM;
1630 }
1631
1632 for (unsigned long i = 0; i < sqe_count; i++) {
1633 sqe = rtio_sqe_acquire(r);
1634 __ASSERT_NO_MSG(sqe != NULL);
1635 if (handle != NULL && i == 0) {
1636 *handle = sqe;
1637 }
1638 *sqe = sqes[i];
1639 }
1640
1641 return 0;
1642}
1643
1660static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1661{
1662 return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1663}
1664
1680__syscall int rtio_cqe_copy_out(struct rtio *r,
1681 struct rtio_cqe *cqes,
1682 size_t cqe_count,
1684static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1685 struct rtio_cqe *cqes,
1686 size_t cqe_count,
1688{
1689 size_t copied = 0;
1690 struct rtio_cqe *cqe;
1691 k_timepoint_t end = sys_timepoint_calc(timeout);
1692
1693 do {
1696 if (cqe == NULL) {
1697 Z_SPIN_DELAY(25);
1698 continue;
1699 }
1700 cqes[copied++] = *cqe;
1701 rtio_cqe_release(r, cqe);
1702 } while (copied < cqe_count && !sys_timepoint_expired(end));
1703
1704 return copied;
1705}
1706
1722__syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1723
1724#ifdef CONFIG_RTIO_SUBMIT_SEM
1725static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1726{
1727 int res = 0;
1728
1729 if (wait_count > 0) {
1730 __ASSERT(!k_is_in_isr(),
1731 "expected rtio submit with wait count to be called from a thread");
1732
1733 k_sem_reset(r->submit_sem);
1734 r->submit_count = wait_count;
1735 }
1736
1738
1739 if (wait_count > 0) {
1740 res = k_sem_take(r->submit_sem, K_FOREVER);
1741 __ASSERT(res == 0,
1742 "semaphore was reset or timed out while waiting on completions!");
1743 }
1744
1745 return res;
1746}
1747#else
1748static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1749{
1750
1751 int res = 0;
1752 uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count);
1753 uintptr_t cq_complete_count = cq_count + wait_count;
1754 bool wraps = cq_complete_count < cq_count;
1755
1757
1758 if (wraps) {
1759 while ((uintptr_t)atomic_get(&r->cq_count) >= cq_count) {
1760 Z_SPIN_DELAY(10);
1761 k_yield();
1762 }
1763 }
1764
1765 while ((uintptr_t)atomic_get(&r->cq_count) < cq_complete_count) {
1766 Z_SPIN_DELAY(10);
1767 k_yield();
1768 }
1769
1770 return res;
1771}
1772#endif /* CONFIG_RTIO_SUBMIT_SEM */
1773
1780
1782 struct rtio **contexts;
1783
1786};
1787
1796__syscall struct rtio *rtio_pool_acquire(struct rtio_pool *pool);
1797
1798static inline struct rtio *z_impl_rtio_pool_acquire(struct rtio_pool *pool)
1799{
1800 struct rtio *r = NULL;
1801
1802 for (size_t i = 0; i < pool->pool_size; i++) {
1803 if (atomic_test_and_set_bit(pool->used, i) == 0) {
1804 r = pool->contexts[i];
1805 break;
1806 }
1807 }
1808
1809 if (r != NULL) {
1811 }
1812
1813 return r;
1814}
1815
1822__syscall void rtio_pool_release(struct rtio_pool *pool, struct rtio *r);
1823
1824static inline void z_impl_rtio_pool_release(struct rtio_pool *pool, struct rtio *r)
1825{
1826
1827 if (k_is_user_context()) {
1829 }
1830
1831 for (size_t i = 0; i < pool->pool_size; i++) {
1832 if (pool->contexts[i] == r) {
1833 atomic_clear_bit(pool->used, i);
1834 break;
1835 }
1836 }
1837}
1838
1839/* clang-format off */
1840
1842
1843#define Z_RTIO_POOL_NAME_N(n, name) \
1844 name##_##n
1845
1846#define Z_RTIO_POOL_DEFINE_N(n, name, sq_sz, cq_sz) \
1847 RTIO_DEFINE(Z_RTIO_POOL_NAME_N(n, name), sq_sz, cq_sz)
1848
1849#define Z_RTIO_POOL_REF_N(n, name) \
1850 &Z_RTIO_POOL_NAME_N(n, name)
1851
1853
1862#define RTIO_POOL_DEFINE(name, pool_sz, sq_sz, cq_sz) \
1863 LISTIFY(pool_sz, Z_RTIO_POOL_DEFINE_N, (;), name, sq_sz, cq_sz); \
1864 static struct rtio *name##_contexts[] = { \
1865 LISTIFY(pool_sz, Z_RTIO_POOL_REF_N, (,), name) \
1866 }; \
1867 ATOMIC_DEFINE(name##_used, pool_sz); \
1868 STRUCT_SECTION_ITERABLE(rtio_pool, name) = { \
1869 .pool_size = pool_sz, \
1870 .contexts = name##_contexts, \
1871 .used = name##_used, \
1872 }
1873
1874/* clang-format on */
1875
1879
1880#ifdef __cplusplus
1881}
1882#endif
1883
1884#include <zephyr/syscalls/rtio.h>
1885
1886#endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
workaround assembler barfing for ST r
Definition asm-macro-32-bit-gnu.h:24
long atomic_t
Definition atomic_types.h:15
static void atomic_clear_bit(atomic_t *target, int bit)
Atomically clear a bit.
Definition atomic.h:191
atomic_val_t atomic_get(const atomic_t *target)
Atomic get.
static bool atomic_test_and_set_bit(atomic_t *target, int bit)
Atomically set a bit and test it.
Definition atomic.h:170
atomic_val_t atomic_inc(atomic_t *target)
Atomic increment.
bool atomic_cas(atomic_t *target, atomic_val_t old_value, atomic_val_t new_value)
Atomic compare-and-set.
#define K_FOREVER
Generate infinite timeout delay.
Definition kernel.h:1524
#define K_NO_WAIT
Generate null timeout delay.
Definition kernel.h:1414
k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
Calculate a timepoint value.
static bool sys_timepoint_expired(k_timepoint_t timepoint)
Indicates if timepoint is expired.
Definition clock.h:345
#define K_TIMEOUT_EQ(a, b)
Compare timeouts for equality.
Definition clock.h:80
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int sys_mem_blocks_free_contiguous(sys_mem_blocks_t *mem_block, void *block, size_t count)
Free contiguous multiple memory blocks.
int sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t *mem_block, size_t count, void **out_block)
Allocate a contiguous set of memory blocks.
static ALWAYS_INLINE void mpsc_push(struct mpsc *q, struct mpsc_node *n)
Push a node.
Definition mpsc_lockfree.h:126
static struct mpsc_node * mpsc_pop(struct mpsc *q)
Pop a node off of the list.
Definition mpsc_lockfree.h:145
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags)
Get the block count of a mempool flags.
Definition rtio.h:179
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags)
Get the block index of a mempool flags.
Definition rtio.h:171
#define RTIO_CQE_FLAG_MEMPOOL_BUFFER
The entry's buffer was allocated from the RTIO's mempool.
Definition rtio.h:161
#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)
Prepare CQE flags for a mempool read.
Definition rtio.h:188
#define RTIO_CQE_FLAG_GET(flags)
Definition rtio.h:163
#define RTIO_SQE_MULTISHOT
The SQE should continue producing CQEs until canceled.
Definition rtio.h:137
#define RTIO_SQE_TRANSACTION
The next request in the queue is part of a transaction.
Definition rtio.h:109
#define RTIO_SQE_MEMPOOL_BUFFER
The buffer should be allocated by the RTIO mempool.
Definition rtio.h:121
#define RTIO_SQE_CANCELED
The SQE should not execute if possible.
Definition rtio.h:129
#define RTIO_SQE_NO_RESPONSE
The SQE does not produce a CQE.
Definition rtio.h:142
#define RTIO_SQE_CHAINED
The next request in the queue should wait on this one.
Definition rtio.h:97
void rtio_pool_release(struct rtio_pool *pool, struct rtio *r)
Return an RTIO context to a pool.
static void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare a read op submission with context's mempool.
Definition rtio.h:624
void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result)
#define RTIO_OP_CALLBACK
An operation that calls a given function (callback)
Definition rtio.h:561
static uint32_t rtio_sqe_acquirable(struct rtio *r)
Count of acquirable submission queue events.
Definition rtio.h:1041
static void rtio_sqe_prep_delay(struct rtio_sqe *sqe, k_timeout_t timeout, void *userdata)
Definition rtio.h:808
static void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
Definition rtio.h:859
static void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tiny_write_data, uint8_t tiny_write_len, void *userdata)
Prepare a tiny write op submission.
Definition rtio.h:669
struct rtio * rtio_pool_acquire(struct rtio_pool *pool)
Obtain an RTIO context from a pool.
static size_t rtio_mempool_block_size(const struct rtio *r)
Get the mempool block size of the RTIO context.
Definition rtio.h:473
static void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
Submit a completion queue event with a given result and userdata.
Definition rtio.h:1375
static void rtio_sqe_prep_nop(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, void *userdata)
Prepare a nop (no op) submission.
Definition rtio.h:590
void(* rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, int res, void *arg0)
Callback signature for RTIO_OP_CALLBACK.
Definition rtio.h:283
static void rtio_sqe_prep_await_iodev(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare an await op submission which blocks an rtio_iodev until completion.
Definition rtio.h:786
void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
Release memory that was allocated by the RTIO's memory pool.
static int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
Copy an array of SQEs into the queue.
Definition rtio.h:1660
static void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
Produce a complete queue event if available.
Definition rtio.h:1151
#define RTIO_OP_TINY_TX
An operation that transmits tiny writes by copying the data to write.
Definition rtio.h:558
static uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
Compute the CQE flags from the rtio_iodev_sqe entry.
Definition rtio.h:1258
static void rtio_iodev_sqe_await_signal(struct rtio_iodev_sqe *iodev_sqe, rtio_signaled_t callback, void *userdata)
Await an AWAIT SQE signal from RTIO IODEV.
Definition rtio.h:1591
void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
static int rtio_block_pool_alloc(struct rtio *r, size_t min_sz, size_t max_sz, uint8_t **buf, uint32_t *buf_len)
Definition rtio.h:866
static void rtio_sqe_prep_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *buf, uint32_t len, void *userdata)
Prepare a write op submission.
Definition rtio.h:643
int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes, struct rtio_sqe **handle, size_t sqe_count)
Copy an array of SQEs into the queue and get resulting handles back.
static struct rtio_cqe * rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
Definition rtio.h:842
struct k_mem_partition rtio_partition
The memory partition associated with all RTIO context information.
static void rtio_sqe_prep_read(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *buf, uint32_t len, void *userdata)
Prepare a read op submission.
Definition rtio.h:603
static struct rtio_sqe * rtio_sqe_acquire(struct rtio *r)
Acquire a single submission queue event if available.
Definition rtio.h:1102
#define RTIO_OP_TX
An operation that transmits (writes)
Definition rtio.h:555
static void rtio_sqe_drop_all(struct rtio *r)
Drop all previously acquired sqe.
Definition rtio.h:1120
static void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Definition rtio.h:632
int rtio_cqe_copy_out(struct rtio *r, struct rtio_cqe *cqes, size_t cqe_count, k_timeout_t timeout)
Copy an array of CQEs from the queue.
static int rtio_flush_completion_queue(struct rtio *r)
Flush completion queue.
Definition rtio.h:1234
static void rtio_access_revoke(struct rtio *r, struct k_thread *t)
Revoke access to an RTIO context from a user thread.
Definition rtio.h:1522
static void rtio_sqe_prep_callback(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission.
Definition rtio.h:695
static void rtio_access_grant(struct rtio *r, struct k_thread *t)
Grant access to an RTIO context to a user thread.
Definition rtio.h:1502
#define RTIO_OP_TXRX
An operation that transceives (reads and writes simultaneously)
Definition rtio.h:564
static void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
Release consumed completion queue event.
Definition rtio.h:1221
static void rtio_sqe_prep_await_executor(struct rtio_sqe *sqe, int8_t prio, void *userdata)
Prepare an await op submission which completes the sqe after being signaled.
Definition rtio.h:803
static int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len, uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
Get the buffer associate with the RX submission.
Definition rtio.h:1425
static void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submissions completion with error.
Definition rtio.h:1359
void(* rtio_signaled_t)(struct rtio_iodev_sqe *iodev_sqe, void *userdata)
Callback signature for RTIO_OP_AWAIT signaled.
Definition rtio.h:291
static void rtio_sqe_prep_transceive(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tx_buf, uint8_t *rx_buf, uint32_t buf_len, void *userdata)
Prepare a transceive op submission.
Definition rtio.h:731
int rtio_sqe_cancel(struct rtio_sqe *sqe)
Attempt to cancel an SQE.
static void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
Definition rtio.h:835
static void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submission completion with success.
Definition rtio.h:1346
#define RTIO_OP_NOP
An operation that does nothing and will complete immediately.
Definition rtio.h:549
#define RTIO_OP_AWAIT
An operation to await a signal while blocking the iodev (if one is provided)
Definition rtio.h:585
static struct rtio_cqe * rtio_cqe_acquire(struct rtio *r)
Acquire a complete queue event if available.
Definition rtio.h:1135
static struct rtio_iodev_sqe * rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain.
Definition rtio.h:1072
static struct rtio_cqe * rtio_cqe_consume(struct rtio *r)
Consume a single completion queue event if available.
Definition rtio.h:1167
static struct rtio_iodev_sqe * rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
Definition rtio.h:820
void rtio_sqe_signal(struct rtio_sqe *sqe)
Signal an AWAIT SQE.
static struct rtio_iodev_sqe * rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain or transaction.
Definition rtio.h:1089
static void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission that does not create a CQE.
Definition rtio.h:719
#define RTIO_OP_DELAY
An operation that takes a specified amount of time (asynchronously) before completing.
Definition rtio.h:567
int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe, uint8_t **buff, uint32_t *buff_len)
Retrieve the mempool buffer that was allocated for the CQE.
static struct rtio_iodev_sqe * rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the transaction.
Definition rtio.h:1054
void rtio_executor_submit(struct rtio *r)
static struct rtio_cqe * rtio_cqe_consume_block(struct rtio *r)
Wait for and consume a single completion queue event.
Definition rtio.h:1197
static void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
Definition rtio.h:907
static void rtio_sqe_prep_await(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare an await op submission.
Definition rtio.h:763
#define RTIO_OP_RX
An operation that receives (reads)
Definition rtio.h:552
int rtio_submit(struct rtio *r, uint32_t wait_count)
Submit I/O requests to the underlying executor.
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
#define BIT(n)
Unsigned integer with bit position n set (signed in assembly language).
Definition util_macro.h:44
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition util.h:285
#define DIV_ROUND_UP(n, d)
Divide and round up.
Definition util.h:353
#define EINVAL
Invalid argument.
Definition errno.h:60
#define ENOMEM
Not enough core.
Definition errno.h:50
#define ENOTSUP
Unsupported value.
Definition errno.h:114
void k_yield(void)
Yield the current thread.
static __attribute_const__ k_tid_t k_current_get(void)
Get thread ID of the current thread.
Definition kernel.h:676
void k_object_access_grant(const void *object, struct k_thread *thread)
Grant a thread access to a kernel object.
void k_object_access_revoke(const void *object, struct k_thread *thread)
Revoke a thread's access to a kernel object.
#define NULL
Definition iar_missing_defs.h:20
#define BUILD_ASSERT(EXPR, MSG...)
Definition llvm.h:51
Public kernel APIs.
Memory Blocks Allocator.
A wait-free intrusive multi producer single consumer (MPSC) queue using a singly linked list.
flags
Definition parser.h:97
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__INT32_TYPE__ int32_t
Definition stdint.h:74
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
#define UINT16_MAX
Definition stdint.h:28
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
__UINT16_TYPE__ uint16_t
Definition stdint.h:89
__INT8_TYPE__ int8_t
Definition stdint.h:72
void * memset(void *buf, int c, size_t n)
void * memcpy(void *ZRESTRICT d, const void *ZRESTRICT s, size_t n)
Memory Partition.
Definition mem_domain.h:55
Semaphore structure.
Definition kernel.h:3399
Thread Structure.
Definition thread.h:250
Kernel timeout type.
Definition clock.h:65
Kernel timepoint type.
Definition clock.h:248
Queue member.
Definition mpsc_lockfree.h:79
MPSC Queue.
Definition mpsc_lockfree.h:86
Definition rtio.h:401
struct rtio_cqe * pool
Definition rtio.h:405
struct mpsc free_q
Definition rtio.h:402
const uint16_t pool_size
Definition rtio.h:403
uint16_t pool_free
Definition rtio.h:404
A completion queue event.
Definition rtio.h:386
void * userdata
Associated userdata with operation.
Definition rtio.h:390
struct mpsc_node q
Definition rtio.h:387
uint32_t flags
Flags associated with the operation.
Definition rtio.h:391
int32_t result
Result from operation.
Definition rtio.h:389
API that an RTIO IO device should implement.
Definition rtio.h:525
void(* submit)(struct rtio_iodev_sqe *iodev_sqe)
Submit to the iodev an entry to work on.
Definition rtio.h:534
Compute the mempool block index for a given pointer.
Definition rtio.h:515
struct rtio_iodev_sqe * next
Definition rtio.h:518
struct rtio_sqe sqe
Definition rtio.h:516
struct rtio * r
Definition rtio.h:519
struct mpsc_node q
Definition rtio.h:517
An IO device with a function table for submitting requests.
Definition rtio.h:540
const struct rtio_iodev_api * api
Definition rtio.h:542
void * data
Definition rtio.h:545
Pool of RTIO contexts to use with dynamically created threads.
Definition rtio.h:1777
struct rtio ** contexts
Array containing contexts of the pool.
Definition rtio.h:1782
atomic_t * used
Atomic bitmap to signal a member is used/unused.
Definition rtio.h:1785
size_t pool_size
Size of the pool.
Definition rtio.h:1779
Definition rtio.h:394
struct rtio_iodev_sqe * pool
Definition rtio.h:398
const uint16_t pool_size
Definition rtio.h:396
struct mpsc free_q
Definition rtio.h:395
uint16_t pool_free
Definition rtio.h:397
A submission queue event.
Definition rtio.h:296
uint32_t i2c_config
OP_I2C_CONFIGURE.
Definition rtio.h:356
void * userdata
User provided data which is returned upon operation completion.
Definition rtio.h:314
const uint8_t * tx_buf
Buffer to write from.
Definition rtio.h:345
struct rtio_sqe::@346015370260157122324174060242055067274246076272::@156103043324343070111363144011175302340145332235 tiny_tx
OP_TINY_TX.
uint8_t op
Op code.
Definition rtio.h:297
struct rtio_sqe::@346015370260157122324174060242055067274246076272::@133276262235321357167246345002275273273102345261 rx
OP_RX.
void * arg0
Last argument given to callback.
Definition rtio.h:339
struct rtio_sqe::@346015370260157122324174060242055067274246076272::@004235137221060376063310265133374117312204154130 tx
OP_TX.
atomic_t ok
Definition rtio.h:371
uint8_t * rx_buf
Buffer to read into.
Definition rtio.h:346
uint8_t prio
Op priority.
Definition rtio.h:299
k_timeout_t timeout
Delay timeout.
Definition rtio.h:351
struct _timeout to
Timeout struct.
Definition rtio.h:352
struct rtio_sqe::@346015370260157122324174060242055067274246076272::@324335144354230362066357340057163170350241057343 txrx
OP_TXRX.
uint32_t buf_len
Length of buffer.
Definition rtio.h:320
const struct rtio_iodev * iodev
Device to operation on.
Definition rtio.h:305
struct rtio_sqe::@346015370260157122324174060242055067274246076272::@377160164316265207047041027316236163055075120351 await
OP_AWAIT.
uint32_t iodev_flags
Op iodev flags.
Definition rtio.h:303
void * ccc_payload
OP_I3C_CCC.
Definition rtio.h:367
struct rtio_sqe::@346015370260157122324174060242055067274246076272::@242110314202367053245316142070211303126322345320 delay
OP_DELAY.
int type
Definition rtio.h:361
uint16_t flags
Op Flags.
Definition rtio.h:301
const uint8_t * buf
Buffer to write from.
Definition rtio.h:321
void * config
Definition rtio.h:362
rtio_callback_t callback
Definition rtio.h:338
An RTIO context containing what can be viewed as a pair of queues.
Definition rtio.h:419
struct rtio_cqe_pool * cqe_pool
Definition rtio.h:449
struct mpsc sq
Definition rtio.h:457
atomic_t cq_count
Definition rtio.h:438
struct rtio_sqe_pool * sqe_pool
Definition rtio.h:446
atomic_t xcqcnt
Definition rtio.h:443
struct mpsc cq
Definition rtio.h:460
Misc utilities.
static __pinned_func bool k_is_user_context(void)
Indicate whether the CPU is currently in user mode.
Definition syscall.h:115