Zephyr API Documentation 4.3.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
rtio.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2022 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
25
26#ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27#define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28
29#include <string.h>
30
32#include <zephyr/device.h>
33#include <zephyr/kernel.h>
35#include <zephyr/sys/__assert.h>
36#include <zephyr/sys/atomic.h>
38#include <zephyr/sys/util.h>
41
42#ifdef __cplusplus
43extern "C" {
44#endif
45
46
55
62
66#define RTIO_PRIO_LOW 0U
67
71#define RTIO_PRIO_NORM 127U
72
76#define RTIO_PRIO_HIGH 255U
77
81
82
89
97#define RTIO_SQE_CHAINED BIT(0)
98
109#define RTIO_SQE_TRANSACTION BIT(1)
110
111
121#define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
122
129#define RTIO_SQE_CANCELED BIT(3)
130
137#define RTIO_SQE_MULTISHOT BIT(4)
138
142#define RTIO_SQE_NO_RESPONSE BIT(5)
143
147
154
161#define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
162
163#define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
164
171#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
172
179#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
180
188#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt) \
189 (FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) | \
190 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
191
195
199#define RTIO_IODEV_I2C_STOP BIT(1)
200
204#define RTIO_IODEV_I2C_RESTART BIT(2)
205
209#define RTIO_IODEV_I2C_10_BITS BIT(3)
210
214#define RTIO_IODEV_I3C_STOP BIT(1)
215
219#define RTIO_IODEV_I3C_RESTART BIT(2)
220
224#define RTIO_IODEV_I3C_HDR BIT(3)
225
229#define RTIO_IODEV_I3C_NBCH BIT(4)
230
234#define RTIO_IODEV_I3C_HDR_MODE_MASK GENMASK(15, 8)
235
239#define RTIO_IODEV_I3C_HDR_MODE_SET(flags) \
240 FIELD_PREP(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
241
245#define RTIO_IODEV_I3C_HDR_MODE_GET(flags) \
246 FIELD_GET(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
247
251#define RTIO_IODEV_I3C_HDR_CMD_CODE_MASK GENMASK(22, 16)
252
256#define RTIO_IODEV_I3C_HDR_CMD_CODE_SET(flags) \
257 FIELD_PREP(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
258
262#define RTIO_IODEV_I3C_HDR_CMD_CODE_GET(flags) \
263 FIELD_GET(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
264
266struct rtio;
267struct rtio_cqe;
268struct rtio_sqe;
269struct rtio_sqe_pool;
270struct rtio_cqe_pool;
271struct rtio_iodev;
272struct rtio_iodev_sqe;
274
283typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, int res, void *arg0);
284
291typedef void (*rtio_signaled_t)(struct rtio_iodev_sqe *iodev_sqe, void *userdata);
292
296struct rtio_sqe {
298
300
302
304
305 const struct rtio_iodev *iodev;
306
314 void *userdata;
315
316 union {
317
319 struct {
321 const uint8_t *buf;
322 } tx;
323
325 struct {
326 uint32_t buf_len;
328 } rx;
329
331 struct {
334 } tiny_tx;
335
337 struct {
339 void *arg0;
340 } callback;
341
343 struct {
344 uint32_t buf_len;
347 } txrx;
348
349#ifdef CONFIG_RTIO_OP_DELAY
351 struct {
352 k_timeout_t timeout;
353 struct _timeout to;
354 } delay;
355#endif
356
359
361 struct {
362 /* enum i3c_config_type type; */
363 int type;
364 void *config;
365 } i3c_config;
366
368 /* struct i3c_ccc_payload *ccc_payload; */
370
372 struct {
375 void *userdata;
376 } await;
377 };
378};
379
380
387 struct rtio_sqe sqe;
388 struct mpsc_node q;
390 struct rtio *r;
391};
392
393
395/* Ensure the rtio_iodev_sqe never grows beyond a common cacheline size of 64 bytes */
396#if CONFIG_RTIO_SQE_CACHELINE_CHECK
397#ifdef CONFIG_DCACHE_LINE_SIZE
398#define RTIO_CACHE_LINE_SIZE CONFIG_DCACHE_LINE_SIZE
399#else
400#define RTIO_CACHE_LINE_SIZE 64
401#endif
402BUILD_ASSERT(sizeof(struct rtio_iodev_sqe) <= RTIO_CACHE_LINE_SIZE,
403 "RTIO performs best when the submissions queue entries are less than a cache line")
404#endif
406
407
418
425
432
444struct rtio {
445#ifdef CONFIG_RTIO_SUBMIT_SEM
446 /* A wait semaphore which may suspend the calling thread
447 * to wait for some number of completions when calling submit
448 */
449 struct k_sem *submit_sem;
450
451 uint32_t submit_count;
452#endif
453
454#ifdef CONFIG_RTIO_CONSUME_SEM
455 /* A wait semaphore which may suspend the calling thread
456 * to wait for some number of completions while consuming
457 * them from the completion queue
458 */
459 struct k_sem *consume_sem;
460#endif
461
462 /* Total number of completions */
464
465 /* Number of completions that were unable to be submitted with results
466 * due to the cq spsc being full
467 */
469
470 /* Submission queue object pool with free list */
472
473 /* Complete queue object pool with free list */
475
476#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
477 /* Mem block pool */
478 struct sys_mem_blocks *block_pool;
479#endif
480
481 /* Submission queue */
482 struct mpsc sq;
483
484 /* Completion queue */
485 struct mpsc cq;
486};
487
489extern struct k_mem_partition rtio_partition;
490
498static inline size_t rtio_mempool_block_size(const struct rtio *r)
499{
500#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
501 ARG_UNUSED(r);
502 return 0;
503#else
504 if (r == NULL || r->block_pool == NULL) {
505 return 0;
506 }
507 return BIT(r->block_pool->info.blk_sz_shift);
508#endif
509}
510
518#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
519static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
520{
521 uintptr_t addr = (uintptr_t)ptr;
522 struct sys_mem_blocks *mem_pool = r->block_pool;
523 uint32_t block_size = rtio_mempool_block_size(r);
524
525 uintptr_t buff = (uintptr_t)mem_pool->buffer;
526 uint32_t buff_size = mem_pool->info.num_blocks * block_size;
527
528 if (addr < buff || addr >= buff + buff_size) {
529 return UINT16_MAX;
530 }
531 return (addr - buff) / block_size;
532}
533#endif
534
547 void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
548};
549
554 /* Function pointer table */
555 const struct rtio_iodev_api *api;
556
557 /* Data associated with this iodev */
558 void *data;
559};
560
562#define RTIO_OP_NOP 0
563
565#define RTIO_OP_RX (RTIO_OP_NOP+1)
566
568#define RTIO_OP_TX (RTIO_OP_RX+1)
569
571#define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
572
574#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
575
577#define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
578
580#define RTIO_OP_DELAY (RTIO_OP_TXRX+1)
581
583#define RTIO_OP_I2C_RECOVER (RTIO_OP_DELAY+1)
584
586#define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1)
587
589#define RTIO_OP_I3C_RECOVER (RTIO_OP_I2C_CONFIGURE+1)
590
592#define RTIO_OP_I3C_CONFIGURE (RTIO_OP_I3C_RECOVER+1)
593
595#define RTIO_OP_I3C_CCC (RTIO_OP_I3C_CONFIGURE+1)
596
598#define RTIO_OP_AWAIT (RTIO_OP_I3C_CCC+1)
599
603static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
604 const struct rtio_iodev *iodev,
605 void *userdata)
606{
607 memset(sqe, 0, sizeof(struct rtio_sqe));
608 sqe->op = RTIO_OP_NOP;
609 sqe->iodev = iodev;
610 sqe->userdata = userdata;
611}
612
616static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
617 const struct rtio_iodev *iodev,
618 int8_t prio,
619 uint8_t *buf,
620 uint32_t len,
621 void *userdata)
622{
623 memset(sqe, 0, sizeof(struct rtio_sqe));
624 sqe->op = RTIO_OP_RX;
625 sqe->prio = prio;
626 sqe->iodev = iodev;
627 sqe->rx.buf_len = len;
628 sqe->rx.buf = buf;
629 sqe->userdata = userdata;
630}
631
637static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
638 const struct rtio_iodev *iodev, int8_t prio,
639 void *userdata)
640{
641 rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
643}
644
645static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
646 const struct rtio_iodev *iodev, int8_t prio,
647 void *userdata)
648{
649 rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
651}
652
656static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
657 const struct rtio_iodev *iodev,
658 int8_t prio,
659 const uint8_t *buf,
660 uint32_t len,
661 void *userdata)
662{
663 memset(sqe, 0, sizeof(struct rtio_sqe));
664 sqe->op = RTIO_OP_TX;
665 sqe->prio = prio;
666 sqe->iodev = iodev;
667 sqe->tx.buf_len = len;
668 sqe->tx.buf = buf;
669 sqe->userdata = userdata;
670}
671
682static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
683 const struct rtio_iodev *iodev,
684 int8_t prio,
685 const uint8_t *tiny_write_data,
686 uint8_t tiny_write_len,
687 void *userdata)
688{
689 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf));
690
691 memset(sqe, 0, sizeof(struct rtio_sqe));
692 sqe->op = RTIO_OP_TINY_TX;
693 sqe->prio = prio;
694 sqe->iodev = iodev;
695 sqe->tiny_tx.buf_len = tiny_write_len;
696 memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len);
697 sqe->userdata = userdata;
698}
699
708static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
709 rtio_callback_t callback,
710 void *arg0,
711 void *userdata)
712{
713 memset(sqe, 0, sizeof(struct rtio_sqe));
714 sqe->op = RTIO_OP_CALLBACK;
715 sqe->prio = 0;
716 sqe->iodev = NULL;
717 sqe->callback.callback = callback;
718 sqe->callback.arg0 = arg0;
719 sqe->userdata = userdata;
720}
721
732static inline void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe,
733 rtio_callback_t callback,
734 void *arg0,
735 void *userdata)
736{
737 rtio_sqe_prep_callback(sqe, callback, arg0, userdata);
739}
740
744static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
745 const struct rtio_iodev *iodev,
746 int8_t prio,
747 const uint8_t *tx_buf,
748 uint8_t *rx_buf,
749 uint32_t buf_len,
750 void *userdata)
751{
752 memset(sqe, 0, sizeof(struct rtio_sqe));
753 sqe->op = RTIO_OP_TXRX;
754 sqe->prio = prio;
755 sqe->iodev = iodev;
756 sqe->txrx.buf_len = buf_len;
757 sqe->txrx.tx_buf = tx_buf;
758 sqe->txrx.rx_buf = rx_buf;
759 sqe->userdata = userdata;
760}
761
776static inline void rtio_sqe_prep_await(struct rtio_sqe *sqe,
777 const struct rtio_iodev *iodev,
778 int8_t prio,
779 void *userdata)
780{
781 memset(sqe, 0, sizeof(struct rtio_sqe));
782 sqe->op = RTIO_OP_AWAIT;
783 sqe->prio = prio;
784 sqe->iodev = iodev;
785 sqe->userdata = userdata;
786}
787
799static inline void rtio_sqe_prep_await_iodev(struct rtio_sqe *sqe, const struct rtio_iodev *iodev,
800 int8_t prio, void *userdata)
801{
802 __ASSERT_NO_MSG(iodev != NULL);
803 rtio_sqe_prep_await(sqe, iodev, prio, userdata);
804}
805
816static inline void rtio_sqe_prep_await_executor(struct rtio_sqe *sqe, int8_t prio, void *userdata)
817{
818 rtio_sqe_prep_await(sqe, NULL, prio, userdata);
819}
820
832#ifdef CONFIG_RTIO_OP_DELAY
833static inline void rtio_sqe_prep_delay(struct rtio_sqe *sqe,
834 k_timeout_t timeout,
835 void *userdata)
836{
837 memset(sqe, 0, sizeof(struct rtio_sqe));
838 sqe->op = RTIO_OP_DELAY;
839 sqe->prio = 0;
840 sqe->iodev = NULL;
841 sqe->delay.timeout = timeout;
842 sqe->userdata = userdata;
843}
844#else
845#define rtio_sqe_prep_delay(sqe, timeout, userdata) \
846 BUILD_ASSERT(false, "CONFIG_RTIO_OP_DELAY not enabled")
847#endif
848
849static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
850{
851 struct mpsc_node *node = mpsc_pop(&pool->free_q);
852
853 if (node == NULL) {
854 return NULL;
855 }
856
857 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
858
859 pool->pool_free--;
860
861 return iodev_sqe;
862}
863
864static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
865{
866 mpsc_push(&pool->free_q, &iodev_sqe->q);
867
868 pool->pool_free++;
869}
870
871static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
872{
873 struct mpsc_node *node = mpsc_pop(&pool->free_q);
874
875 if (node == NULL) {
876 return NULL;
877 }
878
879 struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
880
881 memset(cqe, 0, sizeof(struct rtio_cqe));
882
883 pool->pool_free--;
884
885 return cqe;
886}
887
888static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
889{
890 mpsc_push(&pool->free_q, &cqe->q);
891
892 pool->pool_free++;
893}
894
895static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
896 size_t max_sz, uint8_t **buf, uint32_t *buf_len)
897{
898#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
899 ARG_UNUSED(r);
900 ARG_UNUSED(min_sz);
901 ARG_UNUSED(max_sz);
902 ARG_UNUSED(buf);
903 ARG_UNUSED(buf_len);
904 return -ENOTSUP;
905#else
906 const uint32_t block_size = rtio_mempool_block_size(r);
907 uint32_t bytes = max_sz;
908
909 /* Not every context has a block pool and the block size may return 0 in
910 * that case
911 */
912 if (block_size == 0) {
913 return -ENOMEM;
914 }
915
916 do {
917 size_t num_blks = DIV_ROUND_UP(bytes, block_size);
918 int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
919
920 if (rc == 0) {
921 *buf_len = num_blks * block_size;
922 return 0;
923 }
924
925 if (bytes <= block_size) {
926 break;
927 }
928
929 bytes -= block_size;
930 } while (bytes >= min_sz);
931
932 return -ENOMEM;
933#endif
934}
935
936static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
937{
938#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
939 ARG_UNUSED(r);
940 ARG_UNUSED(buf);
941 ARG_UNUSED(buf_len);
942#else
943 size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
944
945 sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
946#endif
947}
948
949/* Do not try and reformat the macros */
950/* clang-format off */
951
959#define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data) \
960 STRUCT_SECTION_ITERABLE(rtio_iodev, name) = { \
961 .api = (iodev_api), \
962 .data = (iodev_data), \
963 }
964
965#define Z_RTIO_SQE_POOL_DEFINE(name, sz) \
966 static struct rtio_iodev_sqe CONCAT(_sqe_pool_, name)[sz]; \
967 STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = { \
968 .free_q = MPSC_INIT((name.free_q)), \
969 .pool_size = sz, \
970 .pool_free = sz, \
971 .pool = CONCAT(_sqe_pool_, name), \
972 }
973
974
975#define Z_RTIO_CQE_POOL_DEFINE(name, sz) \
976 static struct rtio_cqe CONCAT(_cqe_pool_, name)[sz]; \
977 STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = { \
978 .free_q = MPSC_INIT((name.free_q)), \
979 .pool_size = sz, \
980 .pool_free = sz, \
981 .pool = CONCAT(_cqe_pool_, name), \
982 }
983
993#define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
994
1004#define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
1005
1006#define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align) \
1007 RTIO_BMEM uint8_t __aligned(WB_UP(blk_align)) \
1008 CONCAT(_block_pool_, name)[blk_cnt*WB_UP(blk_sz)]; \
1009 _SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt, \
1010 CONCAT(_block_pool_, name), RTIO_DMEM)
1011
1012#define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool) \
1013 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, \
1014 (static K_SEM_DEFINE(CONCAT(_submit_sem_, name), 0, K_SEM_MAX_LIMIT))) \
1015 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, \
1016 (static K_SEM_DEFINE(CONCAT(_consume_sem_, name), 0, K_SEM_MAX_LIMIT))) \
1017 STRUCT_SECTION_ITERABLE(rtio, name) = { \
1018 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &CONCAT(_submit_sem_, name),)) \
1019 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,)) \
1020 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &CONCAT(_consume_sem_, name),))\
1021 .cq_count = ATOMIC_INIT(0), \
1022 .xcqcnt = ATOMIC_INIT(0), \
1023 .sqe_pool = _sqe_pool, \
1024 .cqe_pool = _cqe_pool, \
1025 IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,)) \
1026 .sq = MPSC_INIT((name.sq)), \
1027 .cq = MPSC_INIT((name.cq)), \
1028 }
1029
1037#define RTIO_DEFINE(name, sq_sz, cq_sz) \
1038 Z_RTIO_SQE_POOL_DEFINE(CONCAT(name, _sqe_pool), sq_sz); \
1039 Z_RTIO_CQE_POOL_DEFINE(CONCAT(name, _cqe_pool), cq_sz); \
1040 Z_RTIO_DEFINE(name, &CONCAT(name, _sqe_pool), \
1041 &CONCAT(name, _cqe_pool), NULL)
1042
1043/* clang-format on */
1044
1055#define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
1056 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
1057 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
1058 Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
1059 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
1060
1061/* clang-format on */
1062
1070static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
1071{
1072 return r->sqe_pool->pool_free;
1073}
1074
1083static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
1084{
1085 SYS_PORT_TRACING_FUNC_ENTER(rtio, txn_next, iodev_sqe->r, iodev_sqe);
1086 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
1087 SYS_PORT_TRACING_FUNC_EXIT(rtio, txn_next, iodev_sqe->r, iodev_sqe->next);
1088 return iodev_sqe->next;
1089 } else {
1090 SYS_PORT_TRACING_FUNC_EXIT(rtio, txn_next, iodev_sqe->r, NULL);
1091 return NULL;
1092 }
1093}
1094
1095
1104static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
1105{
1106 SYS_PORT_TRACING_FUNC_ENTER(rtio, txn_next, iodev_sqe->r, iodev_sqe);
1107 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
1108 SYS_PORT_TRACING_FUNC_EXIT(rtio, txn_next, iodev_sqe->r, iodev_sqe->next);
1109 return iodev_sqe->next;
1110 } else {
1111 SYS_PORT_TRACING_FUNC_EXIT(rtio, txn_next, iodev_sqe->r, NULL);
1112 return NULL;
1113 }
1114}
1115
1124static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
1125{
1126 return iodev_sqe->next;
1127}
1128
1137static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
1138{
1139 SYS_PORT_TRACING_FUNC_ENTER(rtio, sqe_acquire, r);
1140 struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
1141
1142 if (iodev_sqe == NULL) {
1143 SYS_PORT_TRACING_FUNC_EXIT(rtio, sqe_acquire, r, NULL);
1144 return NULL;
1145 }
1146
1147 mpsc_push(&r->sq, &iodev_sqe->q);
1148
1149 SYS_PORT_TRACING_FUNC_EXIT(rtio, sqe_acquire, r, &iodev_sqe->sqe);
1150 return &iodev_sqe->sqe;
1151}
1152
1165static inline int rtio_sqe_acquire_array(struct rtio *r, size_t n, struct rtio_sqe **sqes)
1166{
1167 struct rtio_iodev_sqe *iodev_sqe;
1168 size_t i;
1169
1170 for (i = 0; i < n; i++) {
1171 iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
1172 if (iodev_sqe == NULL) {
1173 break;
1174 }
1175 sqes[i] = &iodev_sqe->sqe;
1176 }
1177
1178 /* Not enough SQEs in the pool */
1179 if (i < n) {
1180 while (i > 0) {
1181 i--;
1182 iodev_sqe = CONTAINER_OF(sqes[i], struct rtio_iodev_sqe, sqe);
1183 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
1184 sqes[i] = NULL;
1185 }
1186
1187 return -ENOMEM;
1188 }
1189
1190 for (i = 0; i < n; i++) {
1191 iodev_sqe = CONTAINER_OF(sqes[i], struct rtio_iodev_sqe, sqe);
1192 mpsc_push(&r->sq, &iodev_sqe->q);
1193 }
1194
1195 return 0;
1196}
1197
1203static inline void rtio_sqe_drop_all(struct rtio *r)
1204{
1205 struct rtio_iodev_sqe *iodev_sqe;
1206 struct mpsc_node *node = mpsc_pop(&r->sq);
1207
1208 while (node != NULL) {
1209 iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
1210 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
1211 node = mpsc_pop(&r->sq);
1212 }
1213}
1214
1218static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
1219{
1220 SYS_PORT_TRACING_FUNC_ENTER(rtio, cqe_acquire, r);
1221 struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
1222
1223 if (cqe == NULL) {
1224 SYS_PORT_TRACING_FUNC_EXIT(rtio, cqe_acquire, r, NULL);
1225 return NULL;
1226 }
1227
1228 memset(cqe, 0, sizeof(struct rtio_cqe));
1229
1230 SYS_PORT_TRACING_FUNC_EXIT(rtio, cqe_acquire, r, cqe);
1231 return cqe;
1232}
1233
1237static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
1238{
1239 mpsc_push(&r->cq, &cqe->q);
1240}
1241
1253static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
1254{
1255 SYS_PORT_TRACING_FUNC_ENTER(rtio, cqe_consume, r);
1256 struct mpsc_node *node;
1257 struct rtio_cqe *cqe = NULL;
1258
1259#ifdef CONFIG_RTIO_CONSUME_SEM
1260 if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
1261 SYS_PORT_TRACING_FUNC_EXIT(rtio, cqe_consume, r, NULL);
1262 return NULL;
1263 }
1264#endif
1265
1266 node = mpsc_pop(&r->cq);
1267 if (node == NULL) {
1268 SYS_PORT_TRACING_FUNC_EXIT(rtio, cqe_consume, r, NULL);
1269 return NULL;
1270 }
1271 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1272
1273 SYS_PORT_TRACING_FUNC_EXIT(rtio, cqe_consume, r, cqe);
1274 return cqe;
1275}
1276
1287static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
1288{
1289 struct mpsc_node *node;
1290 struct rtio_cqe *cqe;
1291
1292#ifdef CONFIG_RTIO_CONSUME_SEM
1293 k_sem_take(r->consume_sem, K_FOREVER);
1294#endif
1295 node = mpsc_pop(&r->cq);
1296 while (node == NULL) {
1297 Z_SPIN_DELAY(1);
1298 node = mpsc_pop(&r->cq);
1299 }
1300 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1301
1302 return cqe;
1303}
1304
1311static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1312{
1313 SYS_PORT_TRACING_FUNC(rtio, cqe_release, r, cqe);
1314 rtio_cqe_pool_free(r->cqe_pool, cqe);
1315}
1316
1325static inline int rtio_flush_completion_queue(struct rtio *r)
1326{
1327 struct rtio_cqe *cqe;
1328 int res = 0;
1329
1330 do {
1331 cqe = rtio_cqe_consume(r);
1332 if (cqe != NULL) {
1333 if ((cqe->result < 0) && (res == 0)) {
1334 res = cqe->result;
1335 }
1336 rtio_cqe_release(r, cqe);
1337 }
1338 } while (cqe != NULL);
1339
1340 return res;
1341}
1342
1349static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1350{
1351 uint32_t flags = 0;
1352
1353#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1354 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1355 struct rtio *r = iodev_sqe->r;
1356 struct sys_mem_blocks *mem_pool = r->block_pool;
1357 unsigned int blk_index = 0;
1358 unsigned int blk_count = 0;
1359
1360 if (iodev_sqe->sqe.rx.buf) {
1361 blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >>
1362 mem_pool->info.blk_sz_shift;
1363 blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift;
1364 }
1365 flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1366 }
1367#else
1368 ARG_UNUSED(iodev_sqe);
1369#endif
1370
1371 return flags;
1372}
1373
1389__syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1390 uint8_t **buff, uint32_t *buff_len);
1391
1392static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1393 uint8_t **buff, uint32_t *buff_len)
1394{
1395#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1397 unsigned int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1398 unsigned int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1400
1401 *buff_len = blk_count * blk_size;
1402
1403 if (blk_count > 0) {
1404 *buff = r->block_pool->buffer + blk_idx * blk_size;
1405
1406 __ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
1407 __ASSERT_NO_MSG(*buff <
1408 r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
1409 } else {
1410 *buff = NULL;
1411 }
1412 return 0;
1413 }
1414 return -EINVAL;
1415#else
1416 ARG_UNUSED(r);
1417 ARG_UNUSED(cqe);
1418 ARG_UNUSED(buff);
1419 ARG_UNUSED(buff_len);
1420
1421 return -ENOTSUP;
1422#endif
1423}
1424
1426void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1427void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1428
1437static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1438{
1439 rtio_executor_ok(iodev_sqe, result);
1440}
1441
1450static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1451{
1452 rtio_executor_err(iodev_sqe, result);
1453}
1454
1466static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1467{
1468 SYS_PORT_TRACING_FUNC_ENTER(rtio, cqe_submit, r, result, flags);
1469 struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1470
1471 if (cqe == NULL) {
1472 atomic_inc(&r->xcqcnt);
1473 } else {
1474 cqe->result = result;
1475 cqe->userdata = userdata;
1476 cqe->flags = flags;
1477 rtio_cqe_produce(r, cqe);
1478#ifdef CONFIG_RTIO_CONSUME_SEM
1479 k_sem_give(r->consume_sem);
1480#endif
1481 }
1482
1483 /* atomic_t isn't guaranteed to wrap correctly as it could be signed, so
1484 * we must resort to a cas loop.
1485 */
1486 atomic_t val, new_val;
1487
1488 do {
1489 val = atomic_get(&r->cq_count);
1490 new_val = (atomic_t)((uintptr_t)val + 1);
1491 } while (!atomic_cas(&r->cq_count, val, new_val));
1492
1493#ifdef CONFIG_RTIO_SUBMIT_SEM
1494 if (r->submit_count > 0) {
1495 r->submit_count--;
1496 if (r->submit_count == 0) {
1497 k_sem_give(r->submit_sem);
1498 }
1499 }
1500#endif
1501 SYS_PORT_TRACING_FUNC_EXIT(rtio, cqe_submit, r);
1502}
1503
1504#define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1505
1518static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1519 uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1520{
1521 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1522
1523#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1524 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1525 struct rtio *r = iodev_sqe->r;
1526
1527 if (sqe->rx.buf != NULL) {
1528 if (sqe->rx.buf_len < min_buf_len) {
1529 return -ENOMEM;
1530 }
1531 *buf = sqe->rx.buf;
1532 *buf_len = sqe->rx.buf_len;
1533 return 0;
1534 }
1535
1536 int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
1537 if (rc == 0) {
1538 sqe->rx.buf = *buf;
1539 sqe->rx.buf_len = *buf_len;
1540 return 0;
1541 }
1542
1543 return -ENOMEM;
1544 }
1545#else
1546 ARG_UNUSED(max_buf_len);
1547#endif
1548
1549 if (sqe->rx.buf_len < min_buf_len) {
1550 return -ENOMEM;
1551 }
1552
1553 *buf = sqe->rx.buf;
1554 *buf_len = sqe->rx.buf_len;
1555 return 0;
1556}
1557
1572__syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1573
1574static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1575{
1576#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1577 if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1578 return;
1579 }
1580
1581 rtio_block_pool_free(r, buff, buff_len);
1582#else
1583 ARG_UNUSED(r);
1584 ARG_UNUSED(buff);
1585 ARG_UNUSED(buff_len);
1586#endif
1587}
1588
1595static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1596{
1598
1599#ifdef CONFIG_RTIO_SUBMIT_SEM
1600 k_object_access_grant(r->submit_sem, t);
1601#endif
1602
1603#ifdef CONFIG_RTIO_CONSUME_SEM
1604 k_object_access_grant(r->consume_sem, t);
1605#endif
1606}
1607
1608
1615static inline void rtio_access_revoke(struct rtio *r, struct k_thread *t)
1616{
1618
1619#ifdef CONFIG_RTIO_SUBMIT_SEM
1620 k_object_access_revoke(r->submit_sem, t);
1621#endif
1622
1623#ifdef CONFIG_RTIO_CONSUME_SEM
1624 k_object_access_revoke(r->consume_sem, t);
1625#endif
1626}
1627
1638__syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1639
1640static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1641{
1642 SYS_PORT_TRACING_FUNC(rtio, sqe_cancel, sqe);
1643 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1644
1645 do {
1646 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1647 iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1648 } while (iodev_sqe != NULL);
1649
1650 return 0;
1651}
1652
1664__syscall void rtio_sqe_signal(struct rtio_sqe *sqe);
1665
1666static inline void z_impl_rtio_sqe_signal(struct rtio_sqe *sqe)
1667{
1668 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1669
1670 if (!atomic_cas(&iodev_sqe->sqe.await.ok, 0, 1)) {
1671 iodev_sqe->sqe.await.callback(iodev_sqe, iodev_sqe->sqe.await.userdata);
1672 }
1673}
1674
1685static inline void rtio_iodev_sqe_await_signal(struct rtio_iodev_sqe *iodev_sqe,
1686 rtio_signaled_t callback,
1687 void *userdata)
1688{
1689 iodev_sqe->sqe.await.callback = callback;
1690 iodev_sqe->sqe.await.userdata = userdata;
1691
1692 if (!atomic_cas(&iodev_sqe->sqe.await.ok, 0, 1)) {
1693 callback(iodev_sqe, userdata);
1694 }
1695}
1696
1712__syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1713 struct rtio_sqe **handle, size_t sqe_count);
1714
1715static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1716 struct rtio_sqe **handle,
1717 size_t sqe_count)
1718{
1719 struct rtio_sqe *sqe;
1720 uint32_t acquirable = rtio_sqe_acquirable(r);
1721
1722 if (acquirable < sqe_count) {
1723 return -ENOMEM;
1724 }
1725
1726 for (unsigned long i = 0; i < sqe_count; i++) {
1727 sqe = rtio_sqe_acquire(r);
1728 __ASSERT_NO_MSG(sqe != NULL);
1729 if (handle != NULL && i == 0) {
1730 *handle = sqe;
1731 }
1732 *sqe = sqes[i];
1733 }
1734
1735 return 0;
1736}
1737
1754static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1755{
1756 return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1757}
1758
1774__syscall int rtio_cqe_copy_out(struct rtio *r,
1775 struct rtio_cqe *cqes,
1776 size_t cqe_count,
1777 k_timeout_t timeout);
1778static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1779 struct rtio_cqe *cqes,
1780 size_t cqe_count,
1781 k_timeout_t timeout)
1782{
1783 size_t copied = 0;
1784 struct rtio_cqe *cqe;
1785 k_timepoint_t end = sys_timepoint_calc(timeout);
1786
1787 do {
1790 if (cqe == NULL) {
1791 Z_SPIN_DELAY(25);
1792 continue;
1793 }
1794 cqes[copied++] = *cqe;
1795 rtio_cqe_release(r, cqe);
1796 } while (copied < cqe_count && !sys_timepoint_expired(end));
1797
1798 return copied;
1799}
1800
1816__syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1817
1818#ifdef CONFIG_RTIO_SUBMIT_SEM
1819static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1820{
1821 SYS_PORT_TRACING_FUNC_ENTER(rtio, submit, r, wait_count);
1822 int res = 0;
1823
1824 if (wait_count > 0) {
1825 __ASSERT(!k_is_in_isr(),
1826 "expected rtio submit with wait count to be called from a thread");
1827
1828 k_sem_reset(r->submit_sem);
1829 r->submit_count = wait_count;
1830 }
1831
1833
1834 if (wait_count > 0) {
1835 res = k_sem_take(r->submit_sem, K_FOREVER);
1836 __ASSERT(res == 0,
1837 "semaphore was reset or timed out while waiting on completions!");
1838 }
1839
1841 return res;
1842}
1843#else
1844static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1845{
1846
1847 SYS_PORT_TRACING_FUNC_ENTER(rtio, submit, r, wait_count);
1848 int res = 0;
1849 uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count);
1850 uintptr_t cq_complete_count = cq_count + wait_count;
1851 bool wraps = cq_complete_count < cq_count;
1852
1854
1855 if (wraps) {
1856 while ((uintptr_t)atomic_get(&r->cq_count) >= cq_count) {
1857 Z_SPIN_DELAY(10);
1858 k_yield();
1859 }
1860 }
1861
1862 while ((uintptr_t)atomic_get(&r->cq_count) < cq_complete_count) {
1863 Z_SPIN_DELAY(10);
1864 k_yield();
1865 }
1866
1868 return res;
1869}
1870#endif /* CONFIG_RTIO_SUBMIT_SEM */
1871
1878
1880 struct rtio **contexts;
1881
1884};
1885
1894__syscall struct rtio *rtio_pool_acquire(struct rtio_pool *pool);
1895
1896static inline struct rtio *z_impl_rtio_pool_acquire(struct rtio_pool *pool)
1897{
1898 struct rtio *r = NULL;
1899
1900 for (size_t i = 0; i < pool->pool_size; i++) {
1901 if (atomic_test_and_set_bit(pool->used, i) == 0) {
1902 r = pool->contexts[i];
1903 break;
1904 }
1905 }
1906
1907 if (r != NULL) {
1909 }
1910
1911 return r;
1912}
1913
1920__syscall void rtio_pool_release(struct rtio_pool *pool, struct rtio *r);
1921
1922static inline void z_impl_rtio_pool_release(struct rtio_pool *pool, struct rtio *r)
1923{
1924
1925 if (k_is_user_context()) {
1927 }
1928
1929 for (size_t i = 0; i < pool->pool_size; i++) {
1930 if (pool->contexts[i] == r) {
1931 atomic_clear_bit(pool->used, i);
1932 break;
1933 }
1934 }
1935}
1936
1937/* clang-format off */
1938
1940
1941#define Z_RTIO_POOL_NAME_N(n, name) \
1942 name##_##n
1943
1944#define Z_RTIO_POOL_DEFINE_N(n, name, sq_sz, cq_sz) \
1945 RTIO_DEFINE(Z_RTIO_POOL_NAME_N(n, name), sq_sz, cq_sz)
1946
1947#define Z_RTIO_POOL_REF_N(n, name) \
1948 &Z_RTIO_POOL_NAME_N(n, name)
1949
1951
1960#define RTIO_POOL_DEFINE(name, pool_sz, sq_sz, cq_sz) \
1961 LISTIFY(pool_sz, Z_RTIO_POOL_DEFINE_N, (;), name, sq_sz, cq_sz); \
1962 static struct rtio *name##_contexts[] = { \
1963 LISTIFY(pool_sz, Z_RTIO_POOL_REF_N, (,), name) \
1964 }; \
1965 ATOMIC_DEFINE(name##_used, pool_sz); \
1966 STRUCT_SECTION_ITERABLE(rtio_pool, name) = { \
1967 .pool_size = pool_sz, \
1968 .contexts = name##_contexts, \
1969 .used = name##_used, \
1970 }
1971
1972/* clang-format on */
1973
1977
1978#ifdef __cplusplus
1979}
1980#endif
1981
1982#include <zephyr/syscalls/rtio.h>
1983
1984#endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
workaround assembler barfing for ST r
Definition asm-macro-32-bit-gnu.h:24
long atomic_t
Definition atomic_types.h:15
static _Bool atomic_test_and_set_bit(atomic_t *target, int bit)
Atomically set a bit and test it.
Definition atomic.h:172
static void atomic_clear_bit(atomic_t *target, int bit)
Atomically clear a bit.
Definition atomic.h:193
atomic_val_t atomic_get(const atomic_t *target)
Atomic get.
atomic_val_t atomic_inc(atomic_t *target)
Atomic increment.
_Bool atomic_cas(atomic_t *target, atomic_val_t old_value, atomic_val_t new_value)
Atomic compare-and-set.
#define K_FOREVER
Generate infinite timeout delay.
Definition kernel.h:1666
#define K_NO_WAIT
Generate null timeout delay.
Definition kernel.h:1556
k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
Calculate a timepoint value.
static bool sys_timepoint_expired(k_timepoint_t timepoint)
Indicates if timepoint is expired.
Definition clock.h:388
#define K_TIMEOUT_EQ(a, b)
Compare timeouts for equality.
Definition clock.h:80
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int sys_mem_blocks_free_contiguous(sys_mem_blocks_t *mem_block, void *block, size_t count)
Free contiguous multiple memory blocks.
int sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t *mem_block, size_t count, void **out_block)
Allocate a contiguous set of memory blocks.
static ALWAYS_INLINE void mpsc_push(struct mpsc *q, struct mpsc_node *n)
Push a node.
Definition mpsc_lockfree.h:126
static struct mpsc_node * mpsc_pop(struct mpsc *q)
Pop a node off of the list.
Definition mpsc_lockfree.h:145
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags)
Get the block count of a mempool flags.
Definition rtio.h:179
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags)
Get the block index of a mempool flags.
Definition rtio.h:171
#define RTIO_CQE_FLAG_MEMPOOL_BUFFER
The entry's buffer was allocated from the RTIO's mempool.
Definition rtio.h:161
#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)
Prepare CQE flags for a mempool read.
Definition rtio.h:188
#define RTIO_CQE_FLAG_GET(flags)
Definition rtio.h:163
#define RTIO_SQE_MULTISHOT
The SQE should continue producing CQEs until canceled.
Definition rtio.h:137
#define RTIO_SQE_TRANSACTION
The next request in the queue is part of a transaction.
Definition rtio.h:109
#define RTIO_SQE_MEMPOOL_BUFFER
The buffer should be allocated by the RTIO mempool.
Definition rtio.h:121
#define RTIO_SQE_CANCELED
The SQE should not execute if possible.
Definition rtio.h:129
#define RTIO_SQE_NO_RESPONSE
The SQE does not produce a CQE.
Definition rtio.h:142
#define RTIO_SQE_CHAINED
The next request in the queue should wait on this one.
Definition rtio.h:97
void rtio_pool_release(struct rtio_pool *pool, struct rtio *r)
Return an RTIO context to a pool.
static void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare a read op submission with context's mempool.
Definition rtio.h:637
void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result)
#define RTIO_OP_CALLBACK
An operation that calls a given function (callback).
Definition rtio.h:574
static uint32_t rtio_sqe_acquirable(struct rtio *r)
Count of acquirable submission queue events.
Definition rtio.h:1070
static void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
Definition rtio.h:888
static void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tiny_write_data, uint8_t tiny_write_len, void *userdata)
Prepare a tiny write op submission.
Definition rtio.h:682
struct rtio * rtio_pool_acquire(struct rtio_pool *pool)
Obtain an RTIO context from a pool.
static size_t rtio_mempool_block_size(const struct rtio *r)
Get the mempool block size of the RTIO context.
Definition rtio.h:498
static void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
Submit a completion queue event with a given result and userdata.
Definition rtio.h:1466
static void rtio_sqe_prep_nop(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, void *userdata)
Prepare a nop (no op) submission.
Definition rtio.h:603
void(* rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, int res, void *arg0)
Callback signature for RTIO_OP_CALLBACK.
Definition rtio.h:283
static void rtio_sqe_prep_await_iodev(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare an await op submission which blocks an rtio_iodev until completion.
Definition rtio.h:799
void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
Release memory that was allocated by the RTIO's memory pool.
static int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
Copy an array of SQEs into the queue.
Definition rtio.h:1754
static void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
Produce a complete queue event if available.
Definition rtio.h:1237
#define RTIO_OP_TINY_TX
An operation that transmits tiny writes by copying the data to write.
Definition rtio.h:571
static uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
Compute the CQE flags from the rtio_iodev_sqe entry.
Definition rtio.h:1349
static void rtio_iodev_sqe_await_signal(struct rtio_iodev_sqe *iodev_sqe, rtio_signaled_t callback, void *userdata)
Await an AWAIT SQE signal from RTIO IODEV.
Definition rtio.h:1685
void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
static int rtio_block_pool_alloc(struct rtio *r, size_t min_sz, size_t max_sz, uint8_t **buf, uint32_t *buf_len)
Definition rtio.h:895
static void rtio_sqe_prep_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *buf, uint32_t len, void *userdata)
Prepare a write op submission.
Definition rtio.h:656
int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes, struct rtio_sqe **handle, size_t sqe_count)
Copy an array of SQEs into the queue and get resulting handles back.
static struct rtio_cqe * rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
Definition rtio.h:871
struct k_mem_partition rtio_partition
The memory partition associated with all RTIO context information.
static void rtio_sqe_prep_read(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *buf, uint32_t len, void *userdata)
Prepare a read op submission.
Definition rtio.h:616
static struct rtio_sqe * rtio_sqe_acquire(struct rtio *r)
Acquire a single submission queue event if available.
Definition rtio.h:1137
#define RTIO_OP_TX
An operation that transmits (writes).
Definition rtio.h:568
static void rtio_sqe_drop_all(struct rtio *r)
Drop all previously acquired sqe.
Definition rtio.h:1203
static void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Definition rtio.h:645
int rtio_cqe_copy_out(struct rtio *r, struct rtio_cqe *cqes, size_t cqe_count, k_timeout_t timeout)
Copy an array of CQEs from the queue.
static int rtio_flush_completion_queue(struct rtio *r)
Flush completion queue.
Definition rtio.h:1325
static void rtio_access_revoke(struct rtio *r, struct k_thread *t)
Revoke access to an RTIO context from a user thread.
Definition rtio.h:1615
static void rtio_sqe_prep_callback(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission.
Definition rtio.h:708
static void rtio_access_grant(struct rtio *r, struct k_thread *t)
Grant access to an RTIO context to a user thread.
Definition rtio.h:1595
#define RTIO_OP_TXRX
An operation that transceives (reads and writes simultaneously).
Definition rtio.h:577
static void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
Release consumed completion queue event.
Definition rtio.h:1311
static void rtio_sqe_prep_await_executor(struct rtio_sqe *sqe, int8_t prio, void *userdata)
Prepare an await op submission which completes the sqe after being signaled.
Definition rtio.h:816
static int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len, uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
Get the buffer associate with the RX submission.
Definition rtio.h:1518
static void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submissions completion with error.
Definition rtio.h:1450
void(* rtio_signaled_t)(struct rtio_iodev_sqe *iodev_sqe, void *userdata)
Callback signature for RTIO_OP_AWAIT signaled.
Definition rtio.h:291
static void rtio_sqe_prep_transceive(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tx_buf, uint8_t *rx_buf, uint32_t buf_len, void *userdata)
Prepare a transceive op submission.
Definition rtio.h:744
int rtio_sqe_cancel(struct rtio_sqe *sqe)
Attempt to cancel an SQE.
static void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
Definition rtio.h:864
static void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submission completion with success.
Definition rtio.h:1437
#define rtio_sqe_prep_delay(sqe, timeout, userdata)
Prepare a delay operation submission which completes after the given timeout.
Definition rtio.h:845
#define RTIO_OP_NOP
An operation that does nothing and will complete immediately.
Definition rtio.h:562
#define RTIO_OP_AWAIT
An operation to await a signal while blocking the iodev (if one is provided).
Definition rtio.h:598
static struct rtio_cqe * rtio_cqe_acquire(struct rtio *r)
Acquire a complete queue event if available.
Definition rtio.h:1218
static struct rtio_iodev_sqe * rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain.
Definition rtio.h:1104
static struct rtio_cqe * rtio_cqe_consume(struct rtio *r)
Consume a single completion queue event if available.
Definition rtio.h:1253
static struct rtio_iodev_sqe * rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
Definition rtio.h:849
void rtio_sqe_signal(struct rtio_sqe *sqe)
Signal an AWAIT SQE.
static struct rtio_iodev_sqe * rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain or transaction.
Definition rtio.h:1124
static void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission that does not create a CQE.
Definition rtio.h:732
#define RTIO_OP_DELAY
An operation that takes a specified amount of time (asynchronously) before completing.
Definition rtio.h:580
int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe, uint8_t **buff, uint32_t *buff_len)
Retrieve the mempool buffer that was allocated for the CQE.
static struct rtio_iodev_sqe * rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the transaction.
Definition rtio.h:1083
void rtio_executor_submit(struct rtio *r)
static struct rtio_cqe * rtio_cqe_consume_block(struct rtio *r)
Wait for and consume a single completion queue event.
Definition rtio.h:1287
static int rtio_sqe_acquire_array(struct rtio *r, size_t n, struct rtio_sqe **sqes)
Acquire a number of submission queue events if available.
Definition rtio.h:1165
static void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
Definition rtio.h:936
static void rtio_sqe_prep_await(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare an await op submission.
Definition rtio.h:776
#define RTIO_OP_RX
An operation that receives (reads).
Definition rtio.h:565
int rtio_submit(struct rtio *r, uint32_t wait_count)
Submit I/O requests to the underlying executor.
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
#define SYS_PORT_TRACING_FUNC_ENTER(type, func,...)
Tracing macro for the entry into a function that might or might not return a value.
Definition tracing_macros.h:257
#define SYS_PORT_TRACING_FUNC_EXIT(type, func,...)
Tracing macro for when a function ends its execution.
Definition tracing_macros.h:283
#define SYS_PORT_TRACING_FUNC(type, func,...)
Tracing macro for function calls which are not directly associated with a specific type of object.
Definition tracing_macros.h:244
#define BIT(n)
Unsigned integer with bit position n set (signed in assembly language).
Definition util_macro.h:44
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition util.h:281
#define DIV_ROUND_UP(n, d)
Divide and round up.
Definition util.h:348
#define EINVAL
Invalid argument.
Definition errno.h:60
#define ENOMEM
Not enough core.
Definition errno.h:50
#define ENOTSUP
Unsupported value.
Definition errno.h:114
void k_yield(void)
Yield the current thread.
static __attribute_const__ k_tid_t k_current_get(void)
Get thread ID of the current thread.
Definition kernel.h:836
void k_object_access_grant(const void *object, struct k_thread *thread)
Grant a thread access to a kernel object.
void k_object_access_revoke(const void *object, struct k_thread *thread)
Revoke a thread's access to a kernel object.
#define NULL
Definition iar_missing_defs.h:20
#define BUILD_ASSERT(EXPR, MSG...)
Definition llvm.h:51
Public kernel APIs.
Memory Blocks Allocator.
A wait-free intrusive multi producer single consumer (MPSC) queue using a singly linked list.
flags
Definition parser.h:97
__UINT32_TYPE__ uint32_t
Definition stdint.h:90
__INT32_TYPE__ int32_t
Definition stdint.h:74
__UINT8_TYPE__ uint8_t
Definition stdint.h:88
#define UINT16_MAX
Definition stdint.h:28
__UINTPTR_TYPE__ uintptr_t
Definition stdint.h:105
__UINT16_TYPE__ uint16_t
Definition stdint.h:89
__INT8_TYPE__ int8_t
Definition stdint.h:72
void * memset(void *buf, int c, size_t n)
void * memcpy(void *ZRESTRICT d, const void *ZRESTRICT s, size_t n)
Memory Partition.
Definition mem_domain.h:55
Semaphore structure.
Definition kernel.h:3607
Thread Structure.
Definition thread.h:259
Kernel timeout type.
Definition clock.h:65
Kernel timepoint type.
Definition clock.h:291
Queue member.
Definition mpsc_lockfree.h:79
MPSC Queue.
Definition mpsc_lockfree.h:86
Definition rtio.h:426
struct rtio_cqe * pool
Definition rtio.h:430
struct mpsc free_q
Definition rtio.h:427
const uint16_t pool_size
Definition rtio.h:428
uint16_t pool_free
Definition rtio.h:429
A completion queue event.
Definition rtio.h:411
void * userdata
Associated userdata with operation.
Definition rtio.h:415
struct mpsc_node q
Definition rtio.h:412
uint32_t flags
Flags associated with the operation.
Definition rtio.h:416
int32_t result
Result from operation.
Definition rtio.h:414
Compute the mempool block index for a given pointer.
Definition rtio.h:538
void(* submit)(struct rtio_iodev_sqe *iodev_sqe)
Submit to the iodev an entry to work on.
Definition rtio.h:547
IO device submission queue entry.
Definition rtio.h:386
struct rtio_iodev_sqe * next
Definition rtio.h:389
struct rtio_sqe sqe
Definition rtio.h:387
struct rtio * r
Definition rtio.h:390
struct mpsc_node q
Definition rtio.h:388
An IO device with a function table for submitting requests.
Definition rtio.h:553
const struct rtio_iodev_api * api
Definition rtio.h:555
void * data
Definition rtio.h:558
Pool of RTIO contexts to use with dynamically created threads.
Definition rtio.h:1875
struct rtio ** contexts
Array containing contexts of the pool.
Definition rtio.h:1880
atomic_t * used
Atomic bitmap to signal a member is used/unused.
Definition rtio.h:1883
size_t pool_size
Size of the pool.
Definition rtio.h:1877
Definition rtio.h:419
struct rtio_iodev_sqe * pool
Definition rtio.h:423
const uint16_t pool_size
Definition rtio.h:421
struct mpsc free_q
Definition rtio.h:420
uint16_t pool_free
Definition rtio.h:422
A submission queue event.
Definition rtio.h:296
uint32_t i2c_config
OP_I2C_CONFIGURE.
Definition rtio.h:358
void * userdata
User provided data which is returned upon operation completion.
Definition rtio.h:314
const uint8_t * tx_buf
Buffer to write from.
Definition rtio.h:345
struct rtio_sqe::@346015370260157122324174060242055067274246076272::@156103043324343070111363144011175302340145332235 tiny_tx
OP_TINY_TX.
uint8_t op
Op code.
Definition rtio.h:297
struct rtio_sqe::@346015370260157122324174060242055067274246076272::@133276262235321357167246345002275273273102345261 rx
OP_RX.
void * arg0
Last argument given to callback.
Definition rtio.h:339
struct rtio_sqe::@346015370260157122324174060242055067274246076272::@004235137221060376063310265133374117312204154130 tx
OP_TX.
atomic_t ok
Definition rtio.h:373
uint8_t * rx_buf
Buffer to read into.
Definition rtio.h:346
uint8_t prio
Op priority.
Definition rtio.h:299
struct rtio_sqe::@346015370260157122324174060242055067274246076272::@324335144354230362066357340057163170350241057343 txrx
OP_TXRX.
uint32_t buf_len
Length of buffer.
Definition rtio.h:320
const struct rtio_iodev * iodev
Device to operation on.
Definition rtio.h:305
struct rtio_sqe::@346015370260157122324174060242055067274246076272::@027353315232031212075067017357062040237337145202 await
OP_AWAIT.
uint32_t iodev_flags
Op iodev flags.
Definition rtio.h:303
void * ccc_payload
OP_I3C_CCC.
Definition rtio.h:369
int type
Definition rtio.h:363
uint16_t flags
Op Flags.
Definition rtio.h:301
const uint8_t * buf
Buffer to write from.
Definition rtio.h:321
void * config
Definition rtio.h:364
rtio_callback_t callback
Definition rtio.h:338
An RTIO context containing what can be viewed as a pair of queues.
Definition rtio.h:444
struct rtio_cqe_pool * cqe_pool
Definition rtio.h:474
struct mpsc sq
Definition rtio.h:482
atomic_t cq_count
Definition rtio.h:463
struct rtio_sqe_pool * sqe_pool
Definition rtio.h:471
atomic_t xcqcnt
Definition rtio.h:468
struct mpsc cq
Definition rtio.h:485
Misc utilities.
static __pinned_func bool k_is_user_context(void)
Indicate whether the CPU is currently in user mode.
Definition syscall.h:115