Zephyr API Documentation  3.6.99
A Scalable Open Source RTOS
Loading...
Searching...
No Matches
rtio.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2022 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
26#ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27#define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28
29#include <string.h>
30
32#include <zephyr/device.h>
33#include <zephyr/kernel.h>
35#include <zephyr/sys/__assert.h>
36#include <zephyr/sys/atomic.h>
38#include <zephyr/sys/util.h>
40
41#ifdef __cplusplus
42extern "C" {
43#endif
44
45
65#define RTIO_PRIO_LOW 0U
66
70#define RTIO_PRIO_NORM 127U
71
75#define RTIO_PRIO_HIGH 255U
76
96#define RTIO_SQE_CHAINED BIT(0)
97
108#define RTIO_SQE_TRANSACTION BIT(1)
109
110
120#define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
121
128#define RTIO_SQE_CANCELED BIT(3)
129
136#define RTIO_SQE_MULTISHOT BIT(4)
137
141#define RTIO_SQE_NO_RESPONSE BIT(5)
142
160#define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
161
162#define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
163
170#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
171
178#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
179
187#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt) \
188 (FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) | \
189 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
190
198#define RTIO_IODEV_I2C_STOP BIT(1)
199
203#define RTIO_IODEV_I2C_RESTART BIT(2)
204
208#define RTIO_IODEV_I2C_10_BITS BIT(3)
209
211struct rtio;
212struct rtio_cqe;
213struct rtio_sqe;
214struct rtio_sqe_pool;
215struct rtio_cqe_pool;
216struct rtio_iodev;
217struct rtio_iodev_sqe;
227typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
228
232struct rtio_sqe {
241 uint16_t _resv0;
242
243 const struct rtio_iodev *iodev;
252 void *userdata;
253
254 union {
255
257 struct {
260 };
261
263 struct {
265 uint8_t tiny_buf[7];
266 };
267
269 struct {
271 void *arg0;
272 };
273
275 struct {
279 };
280
283 };
284};
285
287/* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
288BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
294struct rtio_cqe {
296
298 void *userdata;
300};
301
307};
308
313 struct rtio_cqe *pool;
314};
315
327struct rtio {
328#ifdef CONFIG_RTIO_SUBMIT_SEM
329 /* A wait semaphore which may suspend the calling thread
330 * to wait for some number of completions when calling submit
331 */
332 struct k_sem *submit_sem;
333
334 uint32_t submit_count;
335#endif
336
337#ifdef CONFIG_RTIO_CONSUME_SEM
338 /* A wait semaphore which may suspend the calling thread
339 * to wait for some number of completions while consuming
340 * them from the completion queue
341 */
342 struct k_sem *consume_sem;
343#endif
344
345 /* Total number of completions */
347
348 /* Number of completions that were unable to be submitted with results
349 * due to the cq spsc being full
350 */
352
353 /* Submission queue object pool with free list */
355
356 /* Complete queue object pool with free list */
358
359#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
360 /* Mem block pool */
361 struct sys_mem_blocks *block_pool;
362#endif
363
364 /* Submission queue */
365 struct rtio_mpsc sq;
366
367 /* Completion queue */
368 struct rtio_mpsc cq;
369};
370
372extern struct k_mem_partition rtio_partition;
373
381static inline size_t rtio_mempool_block_size(const struct rtio *r)
382{
383#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
384 ARG_UNUSED(r);
385 return 0;
386#else
387 if (r == NULL || r->block_pool == NULL) {
388 return 0;
389 }
390 return BIT(r->block_pool->info.blk_sz_shift);
391#endif
392}
393
401#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
402static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
403{
404 uintptr_t addr = (uintptr_t)ptr;
405 struct sys_mem_blocks *mem_pool = r->block_pool;
406 uint32_t block_size = rtio_mempool_block_size(r);
407
408 uintptr_t buff = (uintptr_t)mem_pool->buffer;
409 uint32_t buff_size = mem_pool->info.num_blocks * block_size;
410
411 if (addr < buff || addr >= buff + buff_size) {
412 return UINT16_MAX;
413 }
414 return (addr - buff) / block_size;
415}
416#endif
417
424 struct rtio_sqe sqe;
427 struct rtio *r;
428};
429
442 void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
443};
444
449 /* Function pointer table */
450 const struct rtio_iodev_api *api;
451
452 /* Queue of RTIO contexts with requests */
454
455 /* Data associated with this iodev */
456 void *data;
457};
458
460#define RTIO_OP_NOP 0
461
463#define RTIO_OP_RX (RTIO_OP_NOP+1)
464
466#define RTIO_OP_TX (RTIO_OP_RX+1)
467
469#define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
470
472#define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
473
475#define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
476
478#define RTIO_OP_I2C_RECOVER (RTIO_OP_TXRX+1)
479
481#define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1)
482
486static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
487 const struct rtio_iodev *iodev,
488 void *userdata)
489{
490 memset(sqe, 0, sizeof(struct rtio_sqe));
491 sqe->op = RTIO_OP_NOP;
492 sqe->iodev = iodev;
493 sqe->userdata = userdata;
494}
495
499static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
500 const struct rtio_iodev *iodev,
501 int8_t prio,
502 uint8_t *buf,
503 uint32_t len,
504 void *userdata)
505{
506 memset(sqe, 0, sizeof(struct rtio_sqe));
507 sqe->op = RTIO_OP_RX;
508 sqe->prio = prio;
509 sqe->iodev = iodev;
510 sqe->buf_len = len;
511 sqe->buf = buf;
512 sqe->userdata = userdata;
513}
514
520static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
521 const struct rtio_iodev *iodev, int8_t prio,
522 void *userdata)
523{
524 rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
526}
527
528static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
529 const struct rtio_iodev *iodev, int8_t prio,
530 void *userdata)
531{
532 rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
534}
535
539static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
540 const struct rtio_iodev *iodev,
541 int8_t prio,
542 uint8_t *buf,
543 uint32_t len,
544 void *userdata)
545{
546 memset(sqe, 0, sizeof(struct rtio_sqe));
547 sqe->op = RTIO_OP_TX;
548 sqe->prio = prio;
549 sqe->iodev = iodev;
550 sqe->buf_len = len;
551 sqe->buf = buf;
552 sqe->userdata = userdata;
553}
554
565static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
566 const struct rtio_iodev *iodev,
567 int8_t prio,
568 const uint8_t *tiny_write_data,
569 uint8_t tiny_write_len,
570 void *userdata)
571{
572 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_buf));
573
574 memset(sqe, 0, sizeof(struct rtio_sqe));
575 sqe->op = RTIO_OP_TINY_TX;
576 sqe->prio = prio;
577 sqe->iodev = iodev;
578 sqe->tiny_buf_len = tiny_write_len;
579 memcpy(sqe->tiny_buf, tiny_write_data, tiny_write_len);
580 sqe->userdata = userdata;
581}
582
591static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
592 rtio_callback_t callback,
593 void *arg0,
594 void *userdata)
595{
596 memset(sqe, 0, sizeof(struct rtio_sqe));
597 sqe->op = RTIO_OP_CALLBACK;
598 sqe->prio = 0;
599 sqe->iodev = NULL;
600 sqe->callback = callback;
601 sqe->arg0 = arg0;
602 sqe->userdata = userdata;
603}
604
608static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
609 const struct rtio_iodev *iodev,
610 int8_t prio,
611 uint8_t *tx_buf,
612 uint8_t *rx_buf,
613 uint32_t buf_len,
614 void *userdata)
615{
616 memset(sqe, 0, sizeof(struct rtio_sqe));
617 sqe->op = RTIO_OP_TXRX;
618 sqe->prio = prio;
619 sqe->iodev = iodev;
620 sqe->txrx_buf_len = buf_len;
621 sqe->tx_buf = tx_buf;
622 sqe->rx_buf = rx_buf;
623 sqe->userdata = userdata;
624}
625
626static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
627{
628 struct rtio_mpsc_node *node = rtio_mpsc_pop(&pool->free_q);
629
630 if (node == NULL) {
631 return NULL;
632 }
633
634 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
635
636 pool->pool_free--;
637
638 return iodev_sqe;
639}
640
641static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
642{
643 rtio_mpsc_push(&pool->free_q, &iodev_sqe->q);
644
645 pool->pool_free++;
646}
647
648static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
649{
650 struct rtio_mpsc_node *node = rtio_mpsc_pop(&pool->free_q);
651
652 if (node == NULL) {
653 return NULL;
654 }
655
656 struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
657
658 memset(cqe, 0, sizeof(struct rtio_cqe));
659
660 pool->pool_free--;
661
662 return cqe;
663}
664
665static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
666{
667 rtio_mpsc_push(&pool->free_q, &cqe->q);
668
669 pool->pool_free++;
670}
671
672static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
673 size_t max_sz, uint8_t **buf, uint32_t *buf_len)
674{
675#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
676 ARG_UNUSED(r);
677 ARG_UNUSED(min_sz);
678 ARG_UNUSED(max_sz);
679 ARG_UNUSED(buf);
680 ARG_UNUSED(buf_len);
681 return -ENOTSUP;
682#else
683 const uint32_t block_size = rtio_mempool_block_size(r);
684 uint32_t bytes = max_sz;
685
686 /* Not every context has a block pool and the block size may return 0 in
687 * that case
688 */
689 if (block_size == 0) {
690 return -ENOMEM;
691 }
692
693 do {
694 size_t num_blks = DIV_ROUND_UP(bytes, block_size);
695 int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
696
697 if (rc == 0) {
698 *buf_len = num_blks * block_size;
699 return 0;
700 }
701
702 bytes -= block_size;
703 } while (bytes >= min_sz);
704
705 return -ENOMEM;
706#endif
707}
708
709static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
710{
711#ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
712 ARG_UNUSED(r);
713 ARG_UNUSED(buf);
714 ARG_UNUSED(buf_len);
715#else
716 size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
717
718 sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
719#endif
720}
721
722/* Do not try and reformat the macros */
723/* clang-format off */
724
732#define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data) \
733 STRUCT_SECTION_ITERABLE(rtio_iodev, name) = { \
734 .api = (iodev_api), \
735 .iodev_sq = RTIO_MPSC_INIT((name.iodev_sq)), \
736 .data = (iodev_data), \
737 }
738
739#define Z_RTIO_SQE_POOL_DEFINE(name, sz) \
740 static struct rtio_iodev_sqe CONCAT(_sqe_pool_, name)[sz]; \
741 STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = { \
742 .free_q = RTIO_MPSC_INIT((name.free_q)), \
743 .pool_size = sz, \
744 .pool_free = sz, \
745 .pool = CONCAT(_sqe_pool_, name), \
746 }
747
748
749#define Z_RTIO_CQE_POOL_DEFINE(name, sz) \
750 static struct rtio_cqe CONCAT(_cqe_pool_, name)[sz]; \
751 STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = { \
752 .free_q = RTIO_MPSC_INIT((name.free_q)), \
753 .pool_size = sz, \
754 .pool_free = sz, \
755 .pool = CONCAT(_cqe_pool_, name), \
756 }
757
767#define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
768
778#define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
779
780#define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align) \
781 RTIO_BMEM uint8_t __aligned(WB_UP(blk_align)) \
782 CONCAT(_block_pool_, name)[blk_cnt*WB_UP(blk_sz)]; \
783 _SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt, \
784 CONCAT(_block_pool_, name), RTIO_DMEM)
785
786#define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool) \
787 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, \
788 (static K_SEM_DEFINE(CONCAT(_submit_sem_, name), 0, K_SEM_MAX_LIMIT))) \
789 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, \
790 (static K_SEM_DEFINE(CONCAT(_consume_sem_, name), 0, K_SEM_MAX_LIMIT))) \
791 STRUCT_SECTION_ITERABLE(rtio, name) = { \
792 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &CONCAT(_submit_sem_, name),)) \
793 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,)) \
794 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &CONCAT(_consume_sem_, name),))\
795 .cq_count = ATOMIC_INIT(0), \
796 .xcqcnt = ATOMIC_INIT(0), \
797 .sqe_pool = _sqe_pool, \
798 .cqe_pool = _cqe_pool, \
799 IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,)) \
800 .sq = RTIO_MPSC_INIT((name.sq)), \
801 .cq = RTIO_MPSC_INIT((name.cq)), \
802 }
803
811#define RTIO_DEFINE(name, sq_sz, cq_sz) \
812 Z_RTIO_SQE_POOL_DEFINE(CONCAT(name, _sqe_pool), sq_sz); \
813 Z_RTIO_CQE_POOL_DEFINE(CONCAT(name, _cqe_pool), cq_sz); \
814 Z_RTIO_DEFINE(name, &CONCAT(name, _sqe_pool), \
815 &CONCAT(name, _cqe_pool), NULL)
816
817/* clang-format on */
818
829#define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
830 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
831 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
832 Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
833 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
834
835/* clang-format on */
836
844static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
845{
846 return r->sqe_pool->pool_free;
847}
848
857static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
858{
859 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
860 return iodev_sqe->next;
861 } else {
862 return NULL;
863 }
864}
865
866
875static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
876{
877 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
878 return iodev_sqe->next;
879 } else {
880 return NULL;
881 }
882}
883
892static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
893{
894 return iodev_sqe->next;
895}
896
905static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
906{
907 struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
908
909 if (iodev_sqe == NULL) {
910 return NULL;
911 }
912
913 rtio_mpsc_push(&r->sq, &iodev_sqe->q);
914
915 return &iodev_sqe->sqe;
916}
917
923static inline void rtio_sqe_drop_all(struct rtio *r)
924{
925 struct rtio_iodev_sqe *iodev_sqe;
926 struct rtio_mpsc_node *node = rtio_mpsc_pop(&r->sq);
927
928 while (node != NULL) {
929 iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
930 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
931 node = rtio_mpsc_pop(&r->sq);
932 }
933}
934
938static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
939{
940 struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
941
942 if (cqe == NULL) {
943 return NULL;
944 }
945
946 memset(cqe, 0, sizeof(struct rtio_cqe));
947
948 return cqe;
949}
950
954static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
955{
956 rtio_mpsc_push(&r->cq, &cqe->q);
957}
958
970static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
971{
972 struct rtio_mpsc_node *node;
973 struct rtio_cqe *cqe = NULL;
974
975#ifdef CONFIG_RTIO_CONSUME_SEM
976 if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
977 return NULL;
978 }
979#endif
980
981 node = rtio_mpsc_pop(&r->cq);
982 if (node == NULL) {
983 return NULL;
984 }
985 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
986
987 return cqe;
988}
989
1000static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
1001{
1002 struct rtio_mpsc_node *node;
1003 struct rtio_cqe *cqe;
1004
1005#ifdef CONFIG_RTIO_CONSUME_SEM
1006 k_sem_take(r->consume_sem, K_FOREVER);
1007#endif
1008 node = rtio_mpsc_pop(&r->cq);
1009 while (node == NULL) {
1010 Z_SPIN_DELAY(1);
1011 node = rtio_mpsc_pop(&r->cq);
1012 }
1013 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1014
1015 return cqe;
1016}
1017
1024static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1025{
1026 rtio_cqe_pool_free(r->cqe_pool, cqe);
1027}
1028
1035static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1036{
1037 uint32_t flags = 0;
1038
1039#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1040 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1041 struct rtio *r = iodev_sqe->r;
1042 struct sys_mem_blocks *mem_pool = r->block_pool;
1043 int blk_index = (iodev_sqe->sqe.buf - mem_pool->buffer) >>
1044 mem_pool->info.blk_sz_shift;
1045 int blk_count = iodev_sqe->sqe.buf_len >> mem_pool->info.blk_sz_shift;
1046
1047 flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1048 }
1049#else
1050 ARG_UNUSED(iodev_sqe);
1051#endif
1052
1053 return flags;
1054}
1055
1071__syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1072 uint8_t **buff, uint32_t *buff_len);
1073
1074static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1075 uint8_t **buff, uint32_t *buff_len)
1076{
1077#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1079 int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1080 int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1082
1083 *buff = r->block_pool->buffer + blk_idx * blk_size;
1084 *buff_len = blk_count * blk_size;
1085 __ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
1086 __ASSERT_NO_MSG(*buff <
1087 r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
1088 return 0;
1089 }
1090 return -EINVAL;
1091#else
1092 ARG_UNUSED(r);
1093 ARG_UNUSED(cqe);
1094 ARG_UNUSED(buff);
1095 ARG_UNUSED(buff_len);
1096
1097 return -ENOTSUP;
1098#endif
1099}
1100
1102void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1103void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1104
1113static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1114{
1115 rtio_executor_ok(iodev_sqe, result);
1116}
1117
1126static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1127{
1128 rtio_executor_err(iodev_sqe, result);
1129}
1130
1136static inline void rtio_iodev_cancel_all(struct rtio_iodev *iodev)
1137{
1138 /* Clear pending requests as -ENODATA */
1139 struct rtio_mpsc_node *node = rtio_mpsc_pop(&iodev->iodev_sq);
1140
1141 while (node != NULL) {
1142 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
1143
1144 rtio_iodev_sqe_err(iodev_sqe, -ECANCELED);
1145 node = rtio_mpsc_pop(&iodev->iodev_sq);
1146 }
1147}
1148
1160static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1161{
1162 struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1163
1164 if (cqe == NULL) {
1165 atomic_inc(&r->xcqcnt);
1166 } else {
1167 cqe->result = result;
1168 cqe->userdata = userdata;
1169 cqe->flags = flags;
1170 rtio_cqe_produce(r, cqe);
1171 }
1172
1173 atomic_inc(&r->cq_count);
1174#ifdef CONFIG_RTIO_SUBMIT_SEM
1175 if (r->submit_count > 0) {
1176 r->submit_count--;
1177 if (r->submit_count == 0) {
1178 k_sem_give(r->submit_sem);
1179 }
1180 }
1181#endif
1182#ifdef CONFIG_RTIO_CONSUME_SEM
1183 k_sem_give(r->consume_sem);
1184#endif
1185}
1186
1187#define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1188
1201static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1202 uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1203{
1204 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1205
1206#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1207 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1208 struct rtio *r = iodev_sqe->r;
1209
1210 if (sqe->buf != NULL) {
1211 if (sqe->buf_len < min_buf_len) {
1212 return -ENOMEM;
1213 }
1214 *buf = sqe->buf;
1215 *buf_len = sqe->buf_len;
1216 return 0;
1217 }
1218
1219 int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
1220 if (rc == 0) {
1221 sqe->buf = *buf;
1222 sqe->buf_len = *buf_len;
1223 return 0;
1224 }
1225
1226 return -ENOMEM;
1227 }
1228#else
1229 ARG_UNUSED(max_buf_len);
1230#endif
1231
1232 if (sqe->buf_len < min_buf_len) {
1233 return -ENOMEM;
1234 }
1235
1236 *buf = sqe->buf;
1237 *buf_len = sqe->buf_len;
1238 return 0;
1239}
1240
1255__syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1256
1257static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1258{
1259#ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1260 if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1261 return;
1262 }
1263
1264 rtio_block_pool_free(r, buff, buff_len);
1265#else
1266 ARG_UNUSED(r);
1267 ARG_UNUSED(buff);
1268 ARG_UNUSED(buff_len);
1269#endif
1270}
1271
1275static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1276{
1278
1279#ifdef CONFIG_RTIO_SUBMIT_SEM
1280 k_object_access_grant(r->submit_sem, t);
1281#endif
1282
1283#ifdef CONFIG_RTIO_CONSUME_SEM
1284 k_object_access_grant(r->consume_sem, t);
1285#endif
1286}
1287
1298__syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1299
1300static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1301{
1302 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1303
1304 do {
1305 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1306 iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1307 } while (iodev_sqe != NULL);
1308
1309 return 0;
1310}
1311
1327__syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1328 struct rtio_sqe **handle, size_t sqe_count);
1329
1330static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1331 struct rtio_sqe **handle,
1332 size_t sqe_count)
1333{
1334 struct rtio_sqe *sqe;
1335 uint32_t acquirable = rtio_sqe_acquirable(r);
1336
1337 if (acquirable < sqe_count) {
1338 return -ENOMEM;
1339 }
1340
1341 for (unsigned long i = 0; i < sqe_count; i++) {
1342 sqe = rtio_sqe_acquire(r);
1343 __ASSERT_NO_MSG(sqe != NULL);
1344 if (handle != NULL && i == 0) {
1345 *handle = sqe;
1346 }
1347 *sqe = sqes[i];
1348 }
1349
1350 return 0;
1351}
1352
1369static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1370{
1371 return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1372}
1373
1389__syscall int rtio_cqe_copy_out(struct rtio *r,
1390 struct rtio_cqe *cqes,
1391 size_t cqe_count,
1392 k_timeout_t timeout);
1393static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1394 struct rtio_cqe *cqes,
1395 size_t cqe_count,
1396 k_timeout_t timeout)
1397{
1398 size_t copied = 0;
1399 struct rtio_cqe *cqe;
1400 k_timepoint_t end = sys_timepoint_calc(timeout);
1401
1402 do {
1405 if (cqe == NULL) {
1406#ifdef CONFIG_BOARD_NATIVE_POSIX
1407 /* Native posix fakes the clock and only moves it forward when sleeping. */
1408 k_sleep(K_TICKS(1));
1409#else
1410 Z_SPIN_DELAY(1);
1411#endif
1412 continue;
1413 }
1414 cqes[copied++] = *cqe;
1415 rtio_cqe_release(r, cqe);
1416 } while (copied < cqe_count && !sys_timepoint_expired(end));
1417
1418 return copied;
1419}
1420
1434__syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1435
1436static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1437{
1438 int res = 0;
1439
1440#ifdef CONFIG_RTIO_SUBMIT_SEM
1441 /* TODO undefined behavior if another thread calls submit of course
1442 */
1443 if (wait_count > 0) {
1444 __ASSERT(!k_is_in_isr(),
1445 "expected rtio submit with wait count to be called from a thread");
1446
1447 k_sem_reset(r->submit_sem);
1448 r->submit_count = wait_count;
1449 }
1450#else
1451 uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count) + wait_count;
1452#endif
1453
1454 /* Submit the queue to the executor which consumes submissions
1455 * and produces completions through ISR chains or other means.
1456 */
1458
1459
1460 /* TODO could be nicer if we could suspend the thread and not
1461 * wake up on each completion here.
1462 */
1463#ifdef CONFIG_RTIO_SUBMIT_SEM
1464
1465 if (wait_count > 0) {
1466 res = k_sem_take(r->submit_sem, K_FOREVER);
1467 __ASSERT(res == 0,
1468 "semaphore was reset or timed out while waiting on completions!");
1469 }
1470#else
1471 while ((uintptr_t)atomic_get(&r->cq_count) < cq_count) {
1472 Z_SPIN_DELAY(10);
1473 k_yield();
1474 }
1475#endif
1476
1477 return res;
1478}
1479
1484#ifdef __cplusplus
1485}
1486#endif
1487
1488#include <syscalls/rtio.h>
1489
1490#endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
workaround assembler barfing for ST r
Definition: asm-macro-32-bit-gnu.h:24
long atomic_t
Definition: atomic_types.h:15
atomic_val_t atomic_get(const atomic_t *target)
Atomic get.
atomic_val_t atomic_inc(atomic_t *target)
Atomic increment.
#define K_FOREVER
Generate infinite timeout delay.
Definition: kernel.h:1363
#define K_NO_WAIT
Generate null timeout delay.
Definition: kernel.h:1253
k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
Calculate a timepoint value.
static bool sys_timepoint_expired(k_timepoint_t timepoint)
Indicates if timepoint is expired.
Definition: sys_clock.h:327
#define K_TIMEOUT_EQ(a, b)
Compare timeouts for equality.
Definition: sys_clock.h:80
#define K_TICKS(t)
Generate timeout delay from system ticks.
Definition: kernel.h:1305
bool k_is_in_isr(void)
Determine if code is running at interrupt level.
int sys_mem_blocks_free_contiguous(sys_mem_blocks_t *mem_block, void *block, size_t count)
Free contiguous multiple memory blocks.
int sys_mem_blocks_alloc_contiguous(sys_mem_blocks_t *mem_block, size_t count, void **out_block)
Allocate a contiguous set of memory blocks.
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags)
Get the block count of a mempool flags.
Definition: rtio.h:178
#define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags)
Get the block index of a mempool flags.
Definition: rtio.h:170
#define RTIO_CQE_FLAG_MEMPOOL_BUFFER
The entry's buffer was allocated from the RTIO's mempool.
Definition: rtio.h:160
#define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)
Prepare CQE flags for a mempool read.
Definition: rtio.h:187
#define RTIO_CQE_FLAG_GET(flags)
Definition: rtio.h:162
static struct rtio_mpsc_node * rtio_mpsc_pop(struct rtio_mpsc *q)
Pop a node off of the list.
Definition: rtio_mpsc.h:146
static ALWAYS_INLINE void rtio_mpsc_push(struct rtio_mpsc *q, struct rtio_mpsc_node *n)
Push a node.
Definition: rtio_mpsc.h:127
#define RTIO_SQE_MULTISHOT
The SQE should continue producing CQEs until canceled.
Definition: rtio.h:136
#define RTIO_SQE_TRANSACTION
The next request in the queue is part of a transaction.
Definition: rtio.h:108
#define RTIO_SQE_MEMPOOL_BUFFER
The buffer should be allocated by the RTIO mempool.
Definition: rtio.h:120
#define RTIO_SQE_CANCELED
The SQE should not execute if possible.
Definition: rtio.h:128
#define RTIO_SQE_CHAINED
The next request in the queue should wait on this one.
Definition: rtio.h:96
static void rtio_sqe_prep_transceive(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *tx_buf, uint8_t *rx_buf, uint32_t buf_len, void *userdata)
Prepare a transceive op submission.
Definition: rtio.h:608
static void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Prepare a read op submission with context's mempool.
Definition: rtio.h:520
void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result)
#define RTIO_OP_CALLBACK
An operation that calls a given function (callback)
Definition: rtio.h:472
static uint32_t rtio_sqe_acquirable(struct rtio *r)
Count of acquirable submission queue events.
Definition: rtio.h:844
static void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
Definition: rtio.h:665
static void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, const uint8_t *tiny_write_data, uint8_t tiny_write_len, void *userdata)
Prepare a tiny write op submission.
Definition: rtio.h:565
static void rtio_iodev_cancel_all(struct rtio_iodev *iodev)
Cancel all requests that are pending for the iodev.
Definition: rtio.h:1136
static size_t rtio_mempool_block_size(const struct rtio *r)
Get the mempool block size of the RTIO context.
Definition: rtio.h:381
static void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
Submit a completion queue event with a given result and userdata.
Definition: rtio.h:1160
static void rtio_sqe_prep_nop(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, void *userdata)
Prepare a nop (no op) submission.
Definition: rtio.h:486
void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
Release memory that was allocated by the RTIO's memory pool.
static int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
Copy an array of SQEs into the queue.
Definition: rtio.h:1369
static void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
Produce a complete queue event if available.
Definition: rtio.h:954
#define RTIO_OP_TINY_TX
An operation that transmits tiny writes by copying the data to write.
Definition: rtio.h:469
static uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
Compute the CQE flags from the rtio_iodev_sqe entry.
Definition: rtio.h:1035
void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
static int rtio_block_pool_alloc(struct rtio *r, size_t min_sz, size_t max_sz, uint8_t **buf, uint32_t *buf_len)
Definition: rtio.h:672
int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes, struct rtio_sqe **handle, size_t sqe_count)
Copy an array of SQEs into the queue and get resulting handles back.
static struct rtio_cqe * rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
Definition: rtio.h:648
struct k_mem_partition rtio_partition
The memory partition associated with all RTIO context information.
static void rtio_sqe_prep_read(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *buf, uint32_t len, void *userdata)
Prepare a read op submission.
Definition: rtio.h:499
static struct rtio_sqe * rtio_sqe_acquire(struct rtio *r)
Acquire a single submission queue event if available.
Definition: rtio.h:905
#define RTIO_OP_TX
An operation that transmits (writes)
Definition: rtio.h:466
static void rtio_sqe_drop_all(struct rtio *r)
Drop all previously acquired sqe.
Definition: rtio.h:923
static void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, void *userdata)
Definition: rtio.h:528
int rtio_cqe_copy_out(struct rtio *r, struct rtio_cqe *cqes, size_t cqe_count, k_timeout_t timeout)
Copy an array of CQEs from the queue.
static void rtio_sqe_prep_callback(struct rtio_sqe *sqe, rtio_callback_t callback, void *arg0, void *userdata)
Prepare a callback op submission.
Definition: rtio.h:591
static void rtio_access_grant(struct rtio *r, struct k_thread *t)
Grant access to an RTIO context to a user thread.
Definition: rtio.h:1275
#define RTIO_OP_TXRX
An operation that transceives (reads and writes simultaneously)
Definition: rtio.h:475
static void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
Release consumed completion queue event.
Definition: rtio.h:1024
static int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len, uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
Get the buffer associate with the RX submission.
Definition: rtio.h:1201
static void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submissions completion with error.
Definition: rtio.h:1126
static void rtio_sqe_prep_write(struct rtio_sqe *sqe, const struct rtio_iodev *iodev, int8_t prio, uint8_t *buf, uint32_t len, void *userdata)
Prepare a write op submission.
Definition: rtio.h:539
void(* rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0)
Callback signature for RTIO_OP_CALLBACK.
Definition: rtio.h:227
int rtio_sqe_cancel(struct rtio_sqe *sqe)
Attempt to cancel an SQE.
static void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
Definition: rtio.h:641
static void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
Inform the executor of a submission completion with success.
Definition: rtio.h:1113
#define RTIO_OP_NOP
An operation that does nothing and will complete immediately.
Definition: rtio.h:460
static struct rtio_cqe * rtio_cqe_acquire(struct rtio *r)
Acquire a complete queue event if available.
Definition: rtio.h:938
static struct rtio_iodev_sqe * rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain.
Definition: rtio.h:875
static struct rtio_cqe * rtio_cqe_consume(struct rtio *r)
Consume a single completion queue event if available.
Definition: rtio.h:970
static struct rtio_iodev_sqe * rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
Definition: rtio.h:626
static struct rtio_iodev_sqe * rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the chain or transaction.
Definition: rtio.h:892
int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe, uint8_t **buff, uint32_t *buff_len)
Retrieve the mempool buffer that was allocated for the CQE.
static struct rtio_iodev_sqe * rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
Get the next sqe in the transaction.
Definition: rtio.h:857
void rtio_executor_submit(struct rtio *r)
static struct rtio_cqe * rtio_cqe_consume_block(struct rtio *r)
Wait for and consume a single completion queue event.
Definition: rtio.h:1000
static void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
Definition: rtio.h:709
#define RTIO_OP_RX
An operation that receives (reads)
Definition: rtio.h:463
int rtio_submit(struct rtio *r, uint32_t wait_count)
Submit I/O requests to the underlying executor.
void k_sem_reset(struct k_sem *sem)
Resets a semaphore's count to zero.
void k_sem_give(struct k_sem *sem)
Give a semaphore.
int k_sem_take(struct k_sem *sem, k_timeout_t timeout)
Take a semaphore.
#define BIT(n)
Unsigned integer with bit position n set (signed in assembly language).
Definition: util_macro.h:44
#define CONTAINER_OF(ptr, type, field)
Get a pointer to a structure containing the element.
Definition: util.h:268
#define DIV_ROUND_UP(n, d)
Divide and round up.
Definition: util.h:326
#define EINVAL
Invalid argument.
Definition: errno.h:61
#define ENOMEM
Not enough core.
Definition: errno.h:51
#define ENOTSUP
Unsupported value.
Definition: errno.h:115
#define ECANCELED
Operation canceled.
Definition: errno.h:118
void k_yield(void)
Yield the current thread.
int32_t k_sleep(k_timeout_t timeout)
Put the current thread to sleep.
void k_object_access_grant(const void *object, struct k_thread *thread)
Grant a thread access to a kernel object.
Public kernel APIs.
Memory Blocks Allocator.
flags
Definition: parser.h:96
A wait-free intrusive multi producer single consumer (MPSC) queue using a singly linked list.
__UINT32_TYPE__ uint32_t
Definition: stdint.h:90
__INT32_TYPE__ int32_t
Definition: stdint.h:74
__UINT8_TYPE__ uint8_t
Definition: stdint.h:88
#define UINT16_MAX
Definition: stdint.h:28
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:105
__UINT16_TYPE__ uint16_t
Definition: stdint.h:89
__INT8_TYPE__ int8_t
Definition: stdint.h:72
void * memset(void *buf, int c, size_t n)
void * memcpy(void *ZRESTRICT d, const void *ZRESTRICT s, size_t n)
Memory Partition.
Definition: mem_domain.h:55
Thread Structure.
Definition: thread.h:259
Kernel timeout type.
Definition: sys_clock.h:65
Kernel timepoint type.
Definition: sys_clock.h:219
Definition: rtio.h:309
struct rtio_cqe * pool
Definition: rtio.h:313
const uint16_t pool_size
Definition: rtio.h:311
uint16_t pool_free
Definition: rtio.h:312
struct rtio_mpsc free_q
Definition: rtio.h:310
A completion queue event.
Definition: rtio.h:294
void * userdata
Associated userdata with operation.
Definition: rtio.h:298
struct rtio_mpsc_node q
Definition: rtio.h:295
uint32_t flags
Flags associated with the operation.
Definition: rtio.h:299
int32_t result
Result from operation.
Definition: rtio.h:297
API that an RTIO IO device should implement.
Definition: rtio.h:433
void(* submit)(struct rtio_iodev_sqe *iodev_sqe)
Submit to the iodev an entry to work on.
Definition: rtio.h:442
Compute the mempool block index for a given pointer.
Definition: rtio.h:423
struct rtio_mpsc_node q
Definition: rtio.h:425
struct rtio_iodev_sqe * next
Definition: rtio.h:426
struct rtio_sqe sqe
Definition: rtio.h:424
struct rtio * r
Definition: rtio.h:427
An IO device with a function table for submitting requests.
Definition: rtio.h:448
struct rtio_mpsc iodev_sq
Definition: rtio.h:453
const struct rtio_iodev_api * api
Definition: rtio.h:450
void * data
Definition: rtio.h:456
Queue member.
Definition: rtio_mpsc.h:80
MPSC Queue.
Definition: rtio_mpsc.h:87
Definition: rtio.h:302
struct rtio_mpsc free_q
Definition: rtio.h:303
struct rtio_iodev_sqe * pool
Definition: rtio.h:306
const uint16_t pool_size
Definition: rtio.h:304
uint16_t pool_free
Definition: rtio.h:305
A submission queue event.
Definition: rtio.h:232
uint32_t i2c_config
OP_I2C_CONFIGURE.
Definition: rtio.h:282
void * userdata
User provided data which is returned upon operation completion.
Definition: rtio.h:252
uint8_t * tx_buf
Definition: rtio.h:277
uint8_t op
Op code.
Definition: rtio.h:233
void * arg0
Last argument given to callback.
Definition: rtio.h:271
uint8_t * rx_buf
Definition: rtio.h:278
uint8_t prio
Op priority.
Definition: rtio.h:235
uint8_t * buf
Buffer to use.
Definition: rtio.h:259
uint32_t buf_len
Length of buffer.
Definition: rtio.h:258
const struct rtio_iodev * iodev
Device to operation on.
Definition: rtio.h:243
uint16_t flags
Op Flags.
Definition: rtio.h:237
uint8_t tiny_buf_len
Length of tiny buffer.
Definition: rtio.h:264
uint32_t txrx_buf_len
Definition: rtio.h:276
uint8_t tiny_buf[7]
Tiny buffer.
Definition: rtio.h:265
rtio_callback_t callback
Definition: rtio.h:270
uint16_t iodev_flags
Op iodev flags.
Definition: rtio.h:239
An RTIO context containing what can be viewed as a pair of queues.
Definition: rtio.h:327
struct rtio_cqe_pool * cqe_pool
Definition: rtio.h:357
struct rtio_mpsc cq
Definition: rtio.h:368
struct rtio_mpsc sq
Definition: rtio.h:365
atomic_t cq_count
Definition: rtio.h:346
struct rtio_sqe_pool * sqe_pool
Definition: rtio.h:354
atomic_t xcqcnt
Definition: rtio.h:351
Misc utilities.