LCOV - code coverage report
Current view: top level - zephyr/rtio - rtio.h Coverage Total Hit
Test: new.info Lines: 76.2 % 172 131
Test Date: 2025-09-25 19:22:35

            Line data    Source code
       1            1 : /*
       2              :  * Copyright (c) 2022 Intel Corporation
       3              :  *
       4              :  * SPDX-License-Identifier: Apache-2.0
       5              :  */
       6              : 
       7              : /**
       8              :  * @file
       9              :  * @brief Real-Time IO device API for moving bytes with low effort
      10              :  *
      11              :  * RTIO is a context for asynchronous batch operations using a submission and completion queue.
      12              :  *
      13              :  * Asynchronous I/O operation are setup in a submission queue. Each entry in the queue describes
      14              :  * the operation it wishes to perform with some understood semantics.
      15              :  *
      16              :  * These operations may be chained in a such a way that only when the current
      17              :  * operation is complete the next will be executed. If the current operation fails
      18              :  * all chained operations will also fail.
      19              :  *
      20              :  * Operations may also be submitted as a transaction where a set of operations are considered
      21              :  * to be one operation.
      22              :  *
      23              :  * The completion of these operations typically provide one or more completion queue events.
      24              :  */
      25              : 
      26              : #ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
      27              : #define ZEPHYR_INCLUDE_RTIO_RTIO_H_
      28              : 
      29              : #include <string.h>
      30              : 
      31              : #include <zephyr/app_memory/app_memdomain.h>
      32              : #include <zephyr/device.h>
      33              : #include <zephyr/kernel.h>
      34              : #include <zephyr/kernel_structs.h>
      35              : #include <zephyr/sys/__assert.h>
      36              : #include <zephyr/sys/atomic.h>
      37              : #include <zephyr/sys/mem_blocks.h>
      38              : #include <zephyr/sys/util.h>
      39              : #include <zephyr/sys/iterable_sections.h>
      40              : #include <zephyr/sys/mpsc_lockfree.h>
      41              : 
      42              : #ifdef __cplusplus
      43              : extern "C" {
      44              : #endif
      45              : 
      46              : 
      47              : /**
      48              :  * @brief RTIO
      49              :  * @defgroup rtio RTIO
      50              :  * @since 3.2
      51              :  * @version 0.2.0
      52              :  * @ingroup os_services
      53              :  * @{
      54              :  */
      55              : 
      56              : /**
      57              :  * @brief RTIO Predefined Priorities
      58              :  * @defgroup rtio_sqe_prio RTIO Priorities
      59              :  * @ingroup rtio
      60              :  * @{
      61              :  */
      62              : 
      63              : /**
      64              :  * @brief Low priority
      65              :  */
      66            1 : #define RTIO_PRIO_LOW 0U
      67              : 
      68              : /**
      69              :  * @brief Normal priority
      70              :  */
      71            1 : #define RTIO_PRIO_NORM 127U
      72              : 
      73              : /**
      74              :  * @brief High priority
      75              :  */
      76            1 : #define RTIO_PRIO_HIGH 255U
      77              : 
      78              : /**
      79              :  * @}
      80              :  */
      81              : 
      82              : 
      83              : /**
      84              :  * @brief RTIO SQE Flags
      85              :  * @defgroup rtio_sqe_flags RTIO SQE Flags
      86              :  * @ingroup rtio
      87              :  * @{
      88              :  */
      89              : 
      90              : /**
      91              :  * @brief The next request in the queue should wait on this one.
      92              :  *
      93              :  * Chained SQEs are individual units of work describing patterns of
      94              :  * ordering and failure cascading. A chained SQE must be started only
      95              :  * after the one before it. They are given to the iodevs one after another.
      96              :  */
      97            1 : #define RTIO_SQE_CHAINED BIT(0)
      98              : 
      99              : /**
     100              :  * @brief The next request in the queue is part of a transaction.
     101              :  *
     102              :  * Transactional SQEs are sequential parts of a unit of work.
     103              :  * Only the first transactional SQE is submitted to an iodev, the
     104              :  * remaining SQEs are never individually submitted but instead considered
     105              :  * to be part of the transaction to the single iodev. The first sqe in the
     106              :  * sequence holds the iodev that will be used and the last holds the userdata
     107              :  * that will be returned in a single completion on failure/success.
     108              :  */
     109            1 : #define RTIO_SQE_TRANSACTION BIT(1)
     110              : 
     111              : 
     112              : /**
     113              :  * @brief The buffer should be allocated by the RTIO mempool
     114              :  *
     115              :  * This flag can only exist if the CONFIG_RTIO_SYS_MEM_BLOCKS Kconfig was
     116              :  * enabled and the RTIO context was created via the RTIO_DEFINE_WITH_MEMPOOL()
     117              :  * macro. If set, the buffer associated with the entry was allocated by the
     118              :  * internal memory pool and should be released as soon as it is no longer
     119              :  * needed via a call to rtio_release_mempool().
     120              :  */
     121            1 : #define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
     122              : 
     123              : /**
     124              :  * @brief The SQE should not execute if possible
     125              :  *
     126              :  * If possible (not yet executed), the SQE should be canceled by flagging it as failed and returning
     127              :  * -ECANCELED as the result.
     128              :  */
     129            1 : #define RTIO_SQE_CANCELED BIT(3)
     130              : 
     131              : /**
     132              :  * @brief The SQE should continue producing CQEs until canceled
     133              :  *
     134              :  * This flag must exist along @ref RTIO_SQE_MEMPOOL_BUFFER and signals that when a read is
     135              :  * complete. It should be placed back in queue until canceled.
     136              :  */
     137            1 : #define RTIO_SQE_MULTISHOT BIT(4)
     138              : 
     139              : /**
     140              :  * @brief The SQE does not produce a CQE.
     141              :  */
     142            1 : #define RTIO_SQE_NO_RESPONSE BIT(5)
     143              : 
     144              : /**
     145              :  * @}
     146              :  */
     147              : 
     148              : /**
     149              :  * @brief RTIO CQE Flags
     150              :  * @defgroup rtio_cqe_flags RTIO CQE Flags
     151              :  * @ingroup rtio
     152              :  * @{
     153              :  */
     154              : 
     155              : /**
     156              :  * @brief The entry's buffer was allocated from the RTIO's mempool
     157              :  *
     158              :  * If this bit is set, the buffer was allocated from the memory pool and should be recycled as
     159              :  * soon as the application is done with it.
     160              :  */
     161            1 : #define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
     162              : 
     163            0 : #define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
     164              : 
     165              : /**
     166              :  * @brief Get the block index of a mempool flags
     167              :  *
     168              :  * @param flags The CQE flags value
     169              :  * @return The block index portion of the flags field.
     170              :  */
     171            1 : #define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
     172              : 
     173              : /**
     174              :  * @brief Get the block count of a mempool flags
     175              :  *
     176              :  * @param flags The CQE flags value
     177              :  * @return The block count portion of the flags field.
     178              :  */
     179            1 : #define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
     180              : 
     181              : /**
     182              :  * @brief Prepare CQE flags for a mempool read.
     183              :  *
     184              :  * @param blk_idx The mempool block index
     185              :  * @param blk_cnt The mempool block count
     186              :  * @return A shifted and masked value that can be added to the flags field with an OR operator.
     187              :  */
     188            1 : #define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)                                               \
     189              :         (FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) |                                 \
     190              :          FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
     191              : 
     192              : /**
     193              :  * @}
     194              :  */
     195              : 
     196              : /**
     197              :  * @brief Equivalent to the I2C_MSG_STOP flag
     198              :  */
     199            1 : #define RTIO_IODEV_I2C_STOP BIT(1)
     200              : 
     201              : /**
     202              :  * @brief Equivalent to the I2C_MSG_RESTART flag
     203              :  */
     204            1 : #define RTIO_IODEV_I2C_RESTART BIT(2)
     205              : 
     206              : /**
     207              :  * @brief Equivalent to the I2C_MSG_ADDR_10_BITS
     208              :  */
     209            1 : #define RTIO_IODEV_I2C_10_BITS BIT(3)
     210              : 
     211              : /**
     212              :  * @brief Equivalent to the I3C_MSG_STOP flag
     213              :  */
     214            1 : #define RTIO_IODEV_I3C_STOP BIT(1)
     215              : 
     216              : /**
     217              :  * @brief Equivalent to the I3C_MSG_RESTART flag
     218              :  */
     219            1 : #define RTIO_IODEV_I3C_RESTART BIT(2)
     220              : 
     221              : /**
     222              :  * @brief Equivalent to the I3C_MSG_HDR
     223              :  */
     224            1 : #define RTIO_IODEV_I3C_HDR BIT(3)
     225              : 
     226              : /**
     227              :  * @brief Equivalent to the I3C_MSG_NBCH
     228              :  */
     229            1 : #define RTIO_IODEV_I3C_NBCH BIT(4)
     230              : 
     231              : /**
     232              :  * @brief I3C HDR Mode Mask
     233              :  */
     234            1 : #define RTIO_IODEV_I3C_HDR_MODE_MASK GENMASK(15, 8)
     235              : 
     236              : /**
     237              :  * @brief I3C HDR Mode Mask
     238              :  */
     239            1 : #define RTIO_IODEV_I3C_HDR_MODE_SET(flags) \
     240              :         FIELD_PREP(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
     241              : 
     242              : /**
     243              :  * @brief I3C HDR Mode Mask
     244              :  */
     245            1 : #define RTIO_IODEV_I3C_HDR_MODE_GET(flags) \
     246              :         FIELD_GET(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
     247              : 
     248              : /**
     249              :  * @brief I3C HDR 7b Command Code
     250              :  */
     251            1 : #define RTIO_IODEV_I3C_HDR_CMD_CODE_MASK GENMASK(22, 16)
     252              : 
     253              : /**
     254              :  * @brief I3C HDR 7b Command Code
     255              :  */
     256            1 : #define RTIO_IODEV_I3C_HDR_CMD_CODE_SET(flags) \
     257              :         FIELD_PREP(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
     258              : 
     259              : /**
     260              :  * @brief I3C HDR 7b Command Code
     261              :  */
     262            1 : #define RTIO_IODEV_I3C_HDR_CMD_CODE_GET(flags) \
     263              :         FIELD_GET(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
     264              : 
     265              : /** @cond ignore */
     266              : struct rtio;
     267              : struct rtio_cqe;
     268              : struct rtio_sqe;
     269              : struct rtio_sqe_pool;
     270              : struct rtio_cqe_pool;
     271              : struct rtio_iodev;
     272              : struct rtio_iodev_sqe;
     273              : /** @endcond */
     274              : 
     275              : /**
     276              :  * @typedef rtio_callback_t
     277              :  * @brief Callback signature for RTIO_OP_CALLBACK
     278              :  * @param r RTIO context being used with the callback
     279              :  * @param sqe Submission for the callback op
     280              :  * @param res Result of the previously linked submission.
     281              :  * @param arg0 Argument option as part of the sqe
     282              :  */
     283            1 : typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, int res, void *arg0);
     284              : 
     285              : /**
     286              :  * @typedef rtio_signaled_t
     287              :  * @brief Callback signature for RTIO_OP_AWAIT signaled
     288              :  * @param iodev_sqe IODEV submission for the await op
     289              :  * @param userdata Userdata
     290              :  */
     291            1 : typedef void (*rtio_signaled_t)(struct rtio_iodev_sqe *iodev_sqe, void *userdata);
     292              : 
     293              : /**
     294              :  * @brief A submission queue event
     295              :  */
     296            1 : struct rtio_sqe {
     297            1 :         uint8_t op; /**< Op code */
     298              : 
     299            1 :         uint8_t prio; /**< Op priority */
     300              : 
     301            1 :         uint16_t flags; /**< Op Flags */
     302              : 
     303            1 :         uint32_t iodev_flags; /**< Op iodev flags */
     304              : 
     305            1 :         const struct rtio_iodev *iodev; /**< Device to operation on */
     306              : 
     307              :         /**
     308              :          * User provided data which is returned upon operation completion. Could be a pointer or
     309              :          * integer.
     310              :          *
     311              :          * If unique identification of completions is desired this should be
     312              :          * unique as well.
     313              :          */
     314            1 :         void *userdata;
     315              : 
     316              :         union {
     317              : 
     318              :                 /** OP_TX */
     319              :                 struct {
     320            1 :                         uint32_t buf_len; /**< Length of buffer */
     321            1 :                         const uint8_t *buf; /**< Buffer to write from */
     322            1 :                 } tx;
     323              : 
     324              :                 /** OP_RX */
     325              :                 struct {
     326              :                         uint32_t buf_len; /**< Length of buffer */
     327            1 :                         uint8_t *buf; /**< Buffer to read into */
     328            1 :                 } rx;
     329              : 
     330              :                 /** OP_TINY_TX */
     331              :                 struct {
     332            1 :                         uint8_t buf_len; /**< Length of tiny buffer */
     333            1 :                         uint8_t buf[7]; /**< Tiny buffer */
     334            1 :                 } tiny_tx;
     335              : 
     336              :                 /** OP_CALLBACK */
     337              :                 struct {
     338            0 :                         rtio_callback_t callback;
     339            1 :                         void *arg0; /**< Last argument given to callback */
     340            1 :                 } callback;
     341              : 
     342              :                 /** OP_TXRX */
     343              :                 struct {
     344              :                         uint32_t buf_len; /**< Length of tx and rx buffers */
     345            1 :                         const uint8_t *tx_buf; /**< Buffer to write from */
     346            1 :                         uint8_t *rx_buf; /**< Buffer to read into */
     347            1 :                 } txrx;
     348              : 
     349              :                 /** OP_DELAY */
     350              :                 struct {
     351            1 :                         k_timeout_t timeout; /**< Delay timeout. */
     352            1 :                         struct _timeout to; /**< Timeout struct. Used internally. */
     353            1 :                 } delay;
     354              : 
     355              :                 /** OP_I2C_CONFIGURE */
     356            1 :                 uint32_t i2c_config;
     357              : 
     358              :                 /** OP_I3C_CONFIGURE */
     359              :                 struct {
     360              :                         /* enum i3c_config_type type; */
     361            0 :                         int type;
     362            0 :                         void *config;
     363            1 :                 } i3c_config;
     364              : 
     365              :                 /** OP_I3C_CCC */
     366              :                 /* struct i3c_ccc_payload *ccc_payload; */
     367            1 :                 void *ccc_payload;
     368              : 
     369              :                 /** OP_AWAIT */
     370              :                 struct {
     371            0 :                         atomic_t ok;
     372            0 :                         rtio_signaled_t callback;
     373              :                         void *userdata;
     374            1 :                 } await;
     375            0 :         };
     376              : };
     377              : 
     378              : /** @cond ignore */
     379              : /* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
     380              : BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
     381              : /** @endcond */
     382              : 
     383              : /**
     384              :  * @brief A completion queue event
     385              :  */
     386            1 : struct rtio_cqe {
     387            0 :         struct mpsc_node q;
     388              : 
     389            1 :         int32_t result; /**< Result from operation */
     390            1 :         void *userdata; /**< Associated userdata with operation */
     391            1 :         uint32_t flags; /**< Flags associated with the operation */
     392              : };
     393              : 
     394            0 : struct rtio_sqe_pool {
     395            0 :         struct mpsc free_q;
     396            0 :         const uint16_t pool_size;
     397            0 :         uint16_t pool_free;
     398            0 :         struct rtio_iodev_sqe *pool;
     399              : };
     400              : 
     401            0 : struct rtio_cqe_pool {
     402            0 :         struct mpsc free_q;
     403            0 :         const uint16_t pool_size;
     404            0 :         uint16_t pool_free;
     405            0 :         struct rtio_cqe *pool;
     406              : };
     407              : 
     408              : /**
     409              :  * @brief An RTIO context containing what can be viewed as a pair of queues.
     410              :  *
     411              :  * A queue for submissions (available and in queue to be produced) as well as a queue
     412              :  * of completions (available and ready to be consumed).
     413              :  *
     414              :  * The rtio executor along with any objects implementing the rtio_iodev interface are
     415              :  * the consumers of submissions and producers of completions.
     416              :  *
     417              :  * No work is started until rtio_submit() is called.
     418              :  */
     419            1 : struct rtio {
     420              : #ifdef CONFIG_RTIO_SUBMIT_SEM
     421              :         /* A wait semaphore which may suspend the calling thread
     422              :          * to wait for some number of completions when calling submit
     423              :          */
     424              :         struct k_sem *submit_sem;
     425              : 
     426              :         uint32_t submit_count;
     427              : #endif
     428              : 
     429              : #ifdef CONFIG_RTIO_CONSUME_SEM
     430              :         /* A wait semaphore which may suspend the calling thread
     431              :          * to wait for some number of completions while consuming
     432              :          * them from the completion queue
     433              :          */
     434              :         struct k_sem *consume_sem;
     435              : #endif
     436              : 
     437              :         /* Total number of completions */
     438            0 :         atomic_t cq_count;
     439              : 
     440              :         /* Number of completions that were unable to be submitted with results
     441              :          * due to the cq spsc being full
     442              :          */
     443            0 :         atomic_t xcqcnt;
     444              : 
     445              :         /* Submission queue object pool with free list */
     446            0 :         struct rtio_sqe_pool *sqe_pool;
     447              : 
     448              :         /* Complete queue object pool with free list */
     449            0 :         struct rtio_cqe_pool *cqe_pool;
     450              : 
     451              : #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
     452              :         /* Mem block pool */
     453              :         struct sys_mem_blocks *block_pool;
     454              : #endif
     455              : 
     456              :         /* Submission queue */
     457            0 :         struct mpsc sq;
     458              : 
     459              :         /* Completion queue */
     460            0 :         struct mpsc cq;
     461              : };
     462              : 
     463              : /** The memory partition associated with all RTIO context information */
     464            1 : extern struct k_mem_partition rtio_partition;
     465              : 
     466              : /**
     467              :  * @brief Get the mempool block size of the RTIO context
     468              :  *
     469              :  * @param[in] r The RTIO context
     470              :  * @return The size of each block in the context's mempool
     471              :  * @return 0 if the context doesn't have a mempool
     472              :  */
     473            1 : static inline size_t rtio_mempool_block_size(const struct rtio *r)
     474              : {
     475              : #ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
     476              :         ARG_UNUSED(r);
     477              :         return 0;
     478              : #else
     479              :         if (r == NULL || r->block_pool == NULL) {
     480              :                 return 0;
     481              :         }
     482              :         return BIT(r->block_pool->info.blk_sz_shift);
     483              : #endif
     484              : }
     485              : 
     486              : /**
     487              :  * @brief Compute the mempool block index for a given pointer
     488              :  *
     489              :  * @param[in] r RTIO context
     490              :  * @param[in] ptr Memory pointer in the mempool
     491              :  * @return Index of the mempool block associated with the pointer. Or UINT16_MAX if invalid.
     492              :  */
     493              : #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
     494              : static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
     495              : {
     496              :         uintptr_t addr = (uintptr_t)ptr;
     497              :         struct sys_mem_blocks *mem_pool = r->block_pool;
     498              :         uint32_t block_size = rtio_mempool_block_size(r);
     499              : 
     500              :         uintptr_t buff = (uintptr_t)mem_pool->buffer;
     501              :         uint32_t buff_size = mem_pool->info.num_blocks * block_size;
     502              : 
     503              :         if (addr < buff || addr >= buff + buff_size) {
     504              :                 return UINT16_MAX;
     505              :         }
     506              :         return (addr - buff) / block_size;
     507              : }
     508              : #endif
     509              : 
     510              : /**
     511              :  * @brief IO device submission queue entry
     512              :  *
     513              :  * May be cast safely to and from a rtio_sqe as they occupy the same memory provided by the pool
     514              :  */
     515            1 : struct rtio_iodev_sqe {
     516            0 :         struct rtio_sqe sqe;
     517            0 :         struct mpsc_node q;
     518            0 :         struct rtio_iodev_sqe *next;
     519            0 :         struct rtio *r;
     520              : };
     521              : 
     522              : /**
     523              :  * @brief API that an RTIO IO device should implement
     524              :  */
     525            1 : struct rtio_iodev_api {
     526              :         /**
     527              :          * @brief Submit to the iodev an entry to work on
     528              :          *
     529              :          * This call should be short in duration and most likely
     530              :          * either enqueue or kick off an entry with the hardware.
     531              :          *
     532              :          * @param iodev_sqe Submission queue entry
     533              :          */
     534            1 :         void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
     535              : };
     536              : 
     537              : /**
     538              :  * @brief An IO device with a function table for submitting requests
     539              :  */
     540            1 : struct rtio_iodev {
     541              :         /* Function pointer table */
     542            0 :         const struct rtio_iodev_api *api;
     543              : 
     544              :         /* Data associated with this iodev */
     545            0 :         void *data;
     546              : };
     547              : 
     548              : /** An operation that does nothing and will complete immediately */
     549            1 : #define RTIO_OP_NOP 0
     550              : 
     551              : /** An operation that receives (reads) */
     552            1 : #define RTIO_OP_RX (RTIO_OP_NOP+1)
     553              : 
     554              : /** An operation that transmits (writes) */
     555            1 : #define RTIO_OP_TX (RTIO_OP_RX+1)
     556              : 
     557              : /** An operation that transmits tiny writes by copying the data to write */
     558            1 : #define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
     559              : 
     560              : /** An operation that calls a given function (callback) */
     561            1 : #define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
     562              : 
     563              : /** An operation that transceives (reads and writes simultaneously) */
     564            1 : #define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
     565              : 
     566              : /** An operation that takes a specified amount of time (asynchronously) before completing */
     567            1 : #define RTIO_OP_DELAY (RTIO_OP_TXRX+1)
     568              : 
     569              : /** An operation to recover I2C buses */
     570            1 : #define RTIO_OP_I2C_RECOVER (RTIO_OP_DELAY+1)
     571              : 
     572              : /** An operation to configure I2C buses */
     573            1 : #define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1)
     574              : 
     575              : /** An operation to recover I3C buses */
     576            1 : #define RTIO_OP_I3C_RECOVER (RTIO_OP_I2C_CONFIGURE+1)
     577              : 
     578              : /** An operation to configure I3C buses */
     579            1 : #define RTIO_OP_I3C_CONFIGURE (RTIO_OP_I3C_RECOVER+1)
     580              : 
     581              : /** An operation to sends I3C CCC */
     582            1 : #define RTIO_OP_I3C_CCC (RTIO_OP_I3C_CONFIGURE+1)
     583              : 
     584              : /** An operation to await a signal while blocking the iodev (if one is provided) */
     585            1 : #define RTIO_OP_AWAIT (RTIO_OP_I3C_CCC+1)
     586              : 
     587              : /**
     588              :  * @brief Prepare a nop (no op) submission
     589              :  */
     590            1 : static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
     591              :                                 const struct rtio_iodev *iodev,
     592              :                                 void *userdata)
     593              : {
     594              :         memset(sqe, 0, sizeof(struct rtio_sqe));
     595              :         sqe->op = RTIO_OP_NOP;
     596              :         sqe->iodev = iodev;
     597              :         sqe->userdata = userdata;
     598              : }
     599              : 
     600              : /**
     601              :  * @brief Prepare a read op submission
     602              :  */
     603            1 : static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
     604              :                                       const struct rtio_iodev *iodev,
     605              :                                       int8_t prio,
     606              :                                       uint8_t *buf,
     607              :                                       uint32_t len,
     608              :                                       void *userdata)
     609              : {
     610              :         memset(sqe, 0, sizeof(struct rtio_sqe));
     611              :         sqe->op = RTIO_OP_RX;
     612              :         sqe->prio = prio;
     613              :         sqe->iodev = iodev;
     614              :         sqe->rx.buf_len = len;
     615              :         sqe->rx.buf = buf;
     616              :         sqe->userdata = userdata;
     617              : }
     618              : 
     619              : /**
     620              :  * @brief Prepare a read op submission with context's mempool
     621              :  *
     622              :  * @see rtio_sqe_prep_read()
     623              :  */
     624            1 : static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
     625              :                                                 const struct rtio_iodev *iodev, int8_t prio,
     626              :                                                 void *userdata)
     627              : {
     628              :         rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
     629              :         sqe->flags = RTIO_SQE_MEMPOOL_BUFFER;
     630              : }
     631              : 
     632            0 : static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
     633              :                                                 const struct rtio_iodev *iodev, int8_t prio,
     634              :                                                 void *userdata)
     635              : {
     636              :         rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
     637              :         sqe->flags |= RTIO_SQE_MULTISHOT;
     638              : }
     639              : 
     640              : /**
     641              :  * @brief Prepare a write op submission
     642              :  */
     643            1 : static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
     644              :                                        const struct rtio_iodev *iodev,
     645              :                                        int8_t prio,
     646              :                                        const uint8_t *buf,
     647              :                                        uint32_t len,
     648              :                                        void *userdata)
     649              : {
     650              :         memset(sqe, 0, sizeof(struct rtio_sqe));
     651              :         sqe->op = RTIO_OP_TX;
     652              :         sqe->prio = prio;
     653              :         sqe->iodev = iodev;
     654              :         sqe->tx.buf_len = len;
     655              :         sqe->tx.buf = buf;
     656              :         sqe->userdata = userdata;
     657              : }
     658              : 
     659              : /**
     660              :  * @brief Prepare a tiny write op submission
     661              :  *
     662              :  * Unlike the normal write operation where the source buffer must outlive the call
     663              :  * the tiny write data in this case is copied to the sqe. It must be tiny to fit
     664              :  * within the specified size of a rtio_sqe.
     665              :  *
     666              :  * This is useful in many scenarios with RTL logic where a write of the register to
     667              :  * subsequently read must be done.
     668              :  */
     669            1 : static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
     670              :                                             const struct rtio_iodev *iodev,
     671              :                                             int8_t prio,
     672              :                                             const uint8_t *tiny_write_data,
     673              :                                             uint8_t tiny_write_len,
     674              :                                             void *userdata)
     675              : {
     676              :         __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf));
     677              : 
     678              :         memset(sqe, 0, sizeof(struct rtio_sqe));
     679              :         sqe->op = RTIO_OP_TINY_TX;
     680              :         sqe->prio = prio;
     681              :         sqe->iodev = iodev;
     682              :         sqe->tiny_tx.buf_len = tiny_write_len;
     683              :         memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len);
     684              :         sqe->userdata = userdata;
     685              : }
     686              : 
     687              : /**
     688              :  * @brief Prepare a callback op submission
     689              :  *
     690              :  * A somewhat special operation in that it may only be done in kernel mode.
     691              :  *
     692              :  * Used where general purpose logic is required in a queue of io operations to do
     693              :  * transforms or logic.
     694              :  */
     695            1 : static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
     696              :                                           rtio_callback_t callback,
     697              :                                           void *arg0,
     698              :                                           void *userdata)
     699              : {
     700              :         memset(sqe, 0, sizeof(struct rtio_sqe));
     701              :         sqe->op = RTIO_OP_CALLBACK;
     702              :         sqe->prio = 0;
     703              :         sqe->iodev = NULL;
     704              :         sqe->callback.callback = callback;
     705              :         sqe->callback.arg0 = arg0;
     706              :         sqe->userdata = userdata;
     707              : }
     708              : 
     709              : /**
     710              :  * @brief Prepare a callback op submission that does not create a CQE
     711              :  *
     712              :  * Similar to @ref rtio_sqe_prep_callback, but the @ref RTIO_SQE_NO_RESPONSE
     713              :  * flag is set on the SQE to prevent the generation of a CQE upon completion.
     714              :  *
     715              :  * This can be useful when the callback is the last operation in a sequence
     716              :  * whose job is to clean up all the previous CQE's. Without @ref RTIO_SQE_NO_RESPONSE
     717              :  * the completion itself will result in a CQE that cannot be consumed in the callback.
     718              :  */
     719            1 : static inline void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe,
     720              :                                                  rtio_callback_t callback,
     721              :                                                  void *arg0,
     722              :                                                  void *userdata)
     723              : {
     724              :         rtio_sqe_prep_callback(sqe, callback, arg0, userdata);
     725              :         sqe->flags |= RTIO_SQE_NO_RESPONSE;
     726              : }
     727              : 
     728              : /**
     729              :  * @brief Prepare a transceive op submission
     730              :  */
     731            1 : static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
     732              :                                             const struct rtio_iodev *iodev,
     733              :                                             int8_t prio,
     734              :                                             const uint8_t *tx_buf,
     735              :                                             uint8_t *rx_buf,
     736              :                                             uint32_t buf_len,
     737              :                                             void *userdata)
     738              : {
     739              :         memset(sqe, 0, sizeof(struct rtio_sqe));
     740              :         sqe->op = RTIO_OP_TXRX;
     741              :         sqe->prio = prio;
     742              :         sqe->iodev = iodev;
     743              :         sqe->txrx.buf_len = buf_len;
     744              :         sqe->txrx.tx_buf = tx_buf;
     745              :         sqe->txrx.rx_buf = rx_buf;
     746              :         sqe->userdata = userdata;
     747              : }
     748              : 
     749              : /**
     750              :  * @brief Prepare an await op submission
     751              :  *
     752              :  * The await operation will await the completion signal before the sqe completes.
     753              :  *
     754              :  * If an rtio_iodev is provided then it will be blocked while awaiting. This facilitates a
     755              :  * low-latency continuation of the rtio sequence, a sort of "critical section" during a bus
     756              :  * operation if you will.
     757              :  * Note that it is the responsibility of the rtio_iodev driver to properly block during the
     758              :  * operation.
     759              :  *
     760              :  * See @ref rtio_sqe_prep_await_iodev for a helper, where an rtio_iodev is blocked.
     761              :  * See @ref rtio_sqe_prep_await_executor for a helper, where no rtio_iodev is blocked.
     762              :  */
     763            1 : static inline void rtio_sqe_prep_await(struct rtio_sqe *sqe,
     764              :                                        const struct rtio_iodev *iodev,
     765              :                                        int8_t prio,
     766              :                                        void *userdata)
     767              : {
     768              :         memset(sqe, 0, sizeof(struct rtio_sqe));
     769              :         sqe->op = RTIO_OP_AWAIT;
     770              :         sqe->prio = prio;
     771              :         sqe->iodev = iodev;
     772              :         sqe->userdata = userdata;
     773              : }
     774              : 
     775              : /**
     776              :  * @brief Prepare an await op submission which blocks an rtio_iodev until completion
     777              :  *
     778              :  * This variant can be useful if the await op is part of a sequence which must run within a tight
     779              :  * time window as it effectively keeps the underlying bus locked while awaiting completion.
     780              :  * Note that it is the responsibility of the rtio_iodev driver to properly block during the
     781              :  * operation.
     782              :  *
     783              :  * See @ref rtio_sqe_prep_await for details.
     784              :  * See @ref rtio_sqe_prep_await_executor for a counterpart where no rtio_iodev is blocked.
     785              :  */
     786            1 : static inline void rtio_sqe_prep_await_iodev(struct rtio_sqe *sqe, const struct rtio_iodev *iodev,
     787              :                                              int8_t prio, void *userdata)
     788              : {
     789              :         __ASSERT_NO_MSG(iodev != NULL);
     790              :         rtio_sqe_prep_await(sqe, iodev, prio, userdata);
     791              : }
     792              : 
     793              : /**
     794              :  * @brief Prepare an await op submission which completes the sqe after being signaled
     795              :  *
     796              :  * This variant can be useful when the await op serves as a logical piece of a sequence without
     797              :  * requirements for a low-latency continuation of the sequence upon completion, or if the await
     798              :  * op is expected to take "a long time" to complete.
     799              :  *
     800              :  * See @ref rtio_sqe_prep_await for details.
     801              :  * See @ref rtio_sqe_prep_await_iodev for a counterpart where an rtio_iodev is blocked.
     802              :  */
     803            1 : static inline void rtio_sqe_prep_await_executor(struct rtio_sqe *sqe, int8_t prio, void *userdata)
     804              : {
     805              :         rtio_sqe_prep_await(sqe, NULL, prio, userdata);
     806              : }
     807              : 
     808            0 : static inline void rtio_sqe_prep_delay(struct rtio_sqe *sqe,
     809              :                                        k_timeout_t timeout,
     810              :                                        void *userdata)
     811              : {
     812              :         memset(sqe, 0, sizeof(struct rtio_sqe));
     813              :         sqe->op = RTIO_OP_DELAY;
     814              :         sqe->prio = 0;
     815              :         sqe->iodev = NULL;
     816              :         sqe->delay.timeout = timeout;
     817              :         sqe->userdata = userdata;
     818              : }
     819              : 
     820            0 : static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
     821              : {
     822              :         struct mpsc_node *node = mpsc_pop(&pool->free_q);
     823              : 
     824              :         if (node == NULL) {
     825              :                 return NULL;
     826              :         }
     827              : 
     828              :         struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
     829              : 
     830              :         pool->pool_free--;
     831              : 
     832              :         return iodev_sqe;
     833              : }
     834              : 
     835            0 : static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
     836              : {
     837              :         mpsc_push(&pool->free_q, &iodev_sqe->q);
     838              : 
     839              :         pool->pool_free++;
     840              : }
     841              : 
     842            0 : static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
     843              : {
     844              :         struct mpsc_node *node = mpsc_pop(&pool->free_q);
     845              : 
     846              :         if (node == NULL) {
     847              :                 return NULL;
     848              :         }
     849              : 
     850              :         struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
     851              : 
     852              :         memset(cqe, 0, sizeof(struct rtio_cqe));
     853              : 
     854              :         pool->pool_free--;
     855              : 
     856              :         return cqe;
     857              : }
     858              : 
     859            0 : static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
     860              : {
     861              :         mpsc_push(&pool->free_q, &cqe->q);
     862              : 
     863              :         pool->pool_free++;
     864              : }
     865              : 
     866            0 : static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
     867              :                                           size_t max_sz, uint8_t **buf, uint32_t *buf_len)
     868              : {
     869              : #ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
     870              :         ARG_UNUSED(r);
     871              :         ARG_UNUSED(min_sz);
     872              :         ARG_UNUSED(max_sz);
     873              :         ARG_UNUSED(buf);
     874              :         ARG_UNUSED(buf_len);
     875              :         return -ENOTSUP;
     876              : #else
     877              :         const uint32_t block_size = rtio_mempool_block_size(r);
     878              :         uint32_t bytes = max_sz;
     879              : 
     880              :         /* Not every context has a block pool and the block size may return 0 in
     881              :          * that case
     882              :          */
     883              :         if (block_size == 0) {
     884              :                 return -ENOMEM;
     885              :         }
     886              : 
     887              :         do {
     888              :                 size_t num_blks = DIV_ROUND_UP(bytes, block_size);
     889              :                 int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
     890              : 
     891              :                 if (rc == 0) {
     892              :                         *buf_len = num_blks * block_size;
     893              :                         return 0;
     894              :                 }
     895              : 
     896              :                 if (bytes <= block_size) {
     897              :                         break;
     898              :                 }
     899              : 
     900              :                 bytes -= block_size;
     901              :         } while (bytes >= min_sz);
     902              : 
     903              :         return -ENOMEM;
     904              : #endif
     905              : }
     906              : 
     907            0 : static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
     908              : {
     909              : #ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
     910              :         ARG_UNUSED(r);
     911              :         ARG_UNUSED(buf);
     912              :         ARG_UNUSED(buf_len);
     913              : #else
     914              :         size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
     915              : 
     916              :         sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
     917              : #endif
     918              : }
     919              : 
     920              : /* Do not try and reformat the macros */
     921              : /* clang-format off */
     922              : 
     923              : /**
     924              :  * @brief Statically define and initialize an RTIO IODev
     925              :  *
     926              :  * @param name Name of the iodev
     927              :  * @param iodev_api Pointer to struct rtio_iodev_api
     928              :  * @param iodev_data Data pointer
     929              :  */
     930            1 : #define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data)          \
     931              :         STRUCT_SECTION_ITERABLE(rtio_iodev, name) = {           \
     932              :                 .api = (iodev_api),                             \
     933              :                 .data = (iodev_data),                           \
     934              :         }
     935              : 
     936              : #define Z_RTIO_SQE_POOL_DEFINE(name, sz)                        \
     937              :         static struct rtio_iodev_sqe CONCAT(_sqe_pool_, name)[sz];      \
     938              :         STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = {        \
     939              :                 .free_q = MPSC_INIT((name.free_q)),     \
     940              :                 .pool_size = sz,                                \
     941              :                 .pool_free = sz,                                \
     942              :                 .pool = CONCAT(_sqe_pool_, name),               \
     943              :         }
     944              : 
     945              : 
     946              : #define Z_RTIO_CQE_POOL_DEFINE(name, sz)                        \
     947              :         static struct rtio_cqe CONCAT(_cqe_pool_, name)[sz];    \
     948              :         STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = {        \
     949              :                 .free_q = MPSC_INIT((name.free_q)),     \
     950              :                 .pool_size = sz,                                \
     951              :                 .pool_free = sz,                                \
     952              :                 .pool = CONCAT(_cqe_pool_, name),               \
     953              :         }
     954              : 
     955              : /**
     956              :  * @brief Allocate to bss if available
     957              :  *
     958              :  * If CONFIG_USERSPACE is selected, allocate to the rtio_partition bss. Maps to:
     959              :  *   K_APP_BMEM(rtio_partition) static
     960              :  *
     961              :  * If CONFIG_USERSPACE is disabled, allocate as plain static:
     962              :  *   static
     963              :  */
     964            1 : #define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
     965              : 
     966              : /**
     967              :  * @brief Allocate as initialized memory if available
     968              :  *
     969              :  * If CONFIG_USERSPACE is selected, allocate to the rtio_partition init. Maps to:
     970              :  *   K_APP_DMEM(rtio_partition) static
     971              :  *
     972              :  * If CONFIG_USERSPACE is disabled, allocate as plain static:
     973              :  *   static
     974              :  */
     975            1 : #define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
     976              : 
     977              : #define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align)                                 \
     978              :         RTIO_BMEM uint8_t __aligned(WB_UP(blk_align))                                              \
     979              :         CONCAT(_block_pool_, name)[blk_cnt*WB_UP(blk_sz)];                                         \
     980              :         _SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt,                          \
     981              :                                             CONCAT(_block_pool_, name), RTIO_DMEM)
     982              : 
     983              : #define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool)                                     \
     984              :         IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM,                                                         \
     985              :                    (static K_SEM_DEFINE(CONCAT(_submit_sem_, name), 0, K_SEM_MAX_LIMIT)))          \
     986              :         IF_ENABLED(CONFIG_RTIO_CONSUME_SEM,                                                        \
     987              :                    (static K_SEM_DEFINE(CONCAT(_consume_sem_, name), 0, K_SEM_MAX_LIMIT)))         \
     988              :         STRUCT_SECTION_ITERABLE(rtio, name) = {                                                    \
     989              :                 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &CONCAT(_submit_sem_, name),))   \
     990              :                 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,))                           \
     991              :                 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &CONCAT(_consume_sem_, name),))\
     992              :                 .cq_count = ATOMIC_INIT(0),                                                        \
     993              :                 .xcqcnt = ATOMIC_INIT(0),                                                          \
     994              :                 .sqe_pool = _sqe_pool,                                                             \
     995              :                 .cqe_pool = _cqe_pool,                                                             \
     996              :                 IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,))               \
     997              :                 .sq = MPSC_INIT((name.sq)),                                                        \
     998              :                 .cq = MPSC_INIT((name.cq)),                                                        \
     999              :         }
    1000              : 
    1001              : /**
    1002              :  * @brief Statically define and initialize an RTIO context
    1003              :  *
    1004              :  * @param name Name of the RTIO
    1005              :  * @param sq_sz Size of the submission queue entry pool
    1006              :  * @param cq_sz Size of the completion queue entry pool
    1007              :  */
    1008            1 : #define RTIO_DEFINE(name, sq_sz, cq_sz)                                         \
    1009              :         Z_RTIO_SQE_POOL_DEFINE(CONCAT(name, _sqe_pool), sq_sz);                 \
    1010              :         Z_RTIO_CQE_POOL_DEFINE(CONCAT(name, _cqe_pool), cq_sz);                 \
    1011              :         Z_RTIO_DEFINE(name, &CONCAT(name, _sqe_pool),                               \
    1012              :                       &CONCAT(name, _cqe_pool), NULL)
    1013              : 
    1014              : /* clang-format on */
    1015              : 
    1016              : /**
    1017              :  * @brief Statically define and initialize an RTIO context
    1018              :  *
    1019              :  * @param name Name of the RTIO
    1020              :  * @param sq_sz Size of the submission queue, must be power of 2
    1021              :  * @param cq_sz Size of the completion queue, must be power of 2
    1022              :  * @param num_blks Number of blocks in the memory pool
    1023              :  * @param blk_size The number of bytes in each block
    1024              :  * @param balign The block alignment
    1025              :  */
    1026            1 : #define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
    1027              :         Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz);         \
    1028              :         Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz);                 \
    1029              :         Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
    1030              :         Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
    1031              : 
    1032              : /* clang-format on */
    1033              : 
    1034              : /**
    1035              :  * @brief Count of acquirable submission queue events
    1036              :  *
    1037              :  * @param r RTIO context
    1038              :  *
    1039              :  * @return Count of acquirable submission queue events
    1040              :  */
    1041            1 : static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
    1042              : {
    1043              :         return r->sqe_pool->pool_free;
    1044              : }
    1045              : 
    1046              : /**
    1047              :  * @brief Get the next sqe in the transaction
    1048              :  *
    1049              :  * @param iodev_sqe Submission queue entry
    1050              :  *
    1051              :  * @retval NULL if current sqe is last in transaction
    1052              :  * @retval struct rtio_sqe * if available
    1053              :  */
    1054            1 : static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
    1055              : {
    1056              :         if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
    1057              :                 return iodev_sqe->next;
    1058              :         } else {
    1059              :                 return NULL;
    1060              :         }
    1061              : }
    1062              : 
    1063              : 
    1064              : /**
    1065              :  * @brief Get the next sqe in the chain
    1066              :  *
    1067              :  * @param iodev_sqe Submission queue entry
    1068              :  *
    1069              :  * @retval NULL if current sqe is last in chain
    1070              :  * @retval struct rtio_sqe * if available
    1071              :  */
    1072            1 : static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
    1073              : {
    1074              :         if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
    1075              :                 return iodev_sqe->next;
    1076              :         } else {
    1077              :                 return NULL;
    1078              :         }
    1079              : }
    1080              : 
    1081              : /**
    1082              :  * @brief Get the next sqe in the chain or transaction
    1083              :  *
    1084              :  * @param iodev_sqe Submission queue entry
    1085              :  *
    1086              :  * @retval NULL if current sqe is last in chain
    1087              :  * @retval struct rtio_iodev_sqe * if available
    1088              :  */
    1089            1 : static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
    1090              : {
    1091              :         return iodev_sqe->next;
    1092              : }
    1093              : 
    1094              : /**
    1095              :  * @brief Acquire a single submission queue event if available
    1096              :  *
    1097              :  * @param r RTIO context
    1098              :  *
    1099              :  * @retval sqe A valid submission queue event acquired from the submission queue
    1100              :  * @retval NULL No subsmission queue event available
    1101              :  */
    1102            1 : static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
    1103              : {
    1104              :         struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
    1105              : 
    1106              :         if (iodev_sqe == NULL) {
    1107              :                 return NULL;
    1108              :         }
    1109              : 
    1110              :         mpsc_push(&r->sq, &iodev_sqe->q);
    1111              : 
    1112              :         return &iodev_sqe->sqe;
    1113              : }
    1114              : 
    1115              : /**
    1116              :  * @brief Drop all previously acquired sqe
    1117              :  *
    1118              :  * @param r RTIO context
    1119              :  */
    1120            1 : static inline void rtio_sqe_drop_all(struct rtio *r)
    1121              : {
    1122              :         struct rtio_iodev_sqe *iodev_sqe;
    1123              :         struct mpsc_node *node = mpsc_pop(&r->sq);
    1124              : 
    1125              :         while (node != NULL) {
    1126              :                 iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
    1127              :                 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
    1128              :                 node = mpsc_pop(&r->sq);
    1129              :         }
    1130              : }
    1131              : 
    1132              : /**
    1133              :  * @brief Acquire a complete queue event if available
    1134              :  */
    1135            1 : static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
    1136              : {
    1137              :         struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
    1138              : 
    1139              :         if (cqe == NULL) {
    1140              :                 return NULL;
    1141              :         }
    1142              : 
    1143              :         memset(cqe, 0, sizeof(struct rtio_cqe));
    1144              : 
    1145              :         return cqe;
    1146              : }
    1147              : 
    1148              : /**
    1149              :  * @brief Produce a complete queue event if available
    1150              :  */
    1151            1 : static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
    1152              : {
    1153              :         mpsc_push(&r->cq, &cqe->q);
    1154              : }
    1155              : 
    1156              : /**
    1157              :  * @brief Consume a single completion queue event if available
    1158              :  *
    1159              :  * If a completion queue event is returned rtio_cq_release(r) must be called
    1160              :  * at some point to release the cqe spot for the cqe producer.
    1161              :  *
    1162              :  * @param r RTIO context
    1163              :  *
    1164              :  * @retval cqe A valid completion queue event consumed from the completion queue
    1165              :  * @retval NULL No completion queue event available
    1166              :  */
    1167            1 : static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
    1168              : {
    1169              :         struct mpsc_node *node;
    1170              :         struct rtio_cqe *cqe = NULL;
    1171              : 
    1172              : #ifdef CONFIG_RTIO_CONSUME_SEM
    1173              :         if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
    1174              :                 return NULL;
    1175              :         }
    1176              : #endif
    1177              : 
    1178              :         node = mpsc_pop(&r->cq);
    1179              :         if (node == NULL) {
    1180              :                 return NULL;
    1181              :         }
    1182              :         cqe = CONTAINER_OF(node, struct rtio_cqe, q);
    1183              : 
    1184              :         return cqe;
    1185              : }
    1186              : 
    1187              : /**
    1188              :  * @brief Wait for and consume a single completion queue event
    1189              :  *
    1190              :  * If a completion queue event is returned rtio_cq_release(r) must be called
    1191              :  * at some point to release the cqe spot for the cqe producer.
    1192              :  *
    1193              :  * @param r RTIO context
    1194              :  *
    1195              :  * @retval cqe A valid completion queue event consumed from the completion queue
    1196              :  */
    1197            1 : static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
    1198              : {
    1199              :         struct mpsc_node *node;
    1200              :         struct rtio_cqe *cqe;
    1201              : 
    1202              : #ifdef CONFIG_RTIO_CONSUME_SEM
    1203              :         k_sem_take(r->consume_sem, K_FOREVER);
    1204              : #endif
    1205              :         node = mpsc_pop(&r->cq);
    1206              :         while (node == NULL) {
    1207              :                 Z_SPIN_DELAY(1);
    1208              :                 node = mpsc_pop(&r->cq);
    1209              :         }
    1210              :         cqe = CONTAINER_OF(node, struct rtio_cqe, q);
    1211              : 
    1212              :         return cqe;
    1213              : }
    1214              : 
    1215              : /**
    1216              :  * @brief Release consumed completion queue event
    1217              :  *
    1218              :  * @param r RTIO context
    1219              :  * @param cqe Completion queue entry
    1220              :  */
    1221            1 : static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
    1222              : {
    1223              :         rtio_cqe_pool_free(r->cqe_pool, cqe);
    1224              : }
    1225              : 
    1226              : /**
    1227              :  * @brief Flush completion queue
    1228              :  *
    1229              :  * @param r RTIO context
    1230              :  * @return The operation completion result
    1231              :  * @retval 0 if the queued operations completed with no error
    1232              :  * @retval <0 on error
    1233              :  */
    1234            1 : static inline int rtio_flush_completion_queue(struct rtio *r)
    1235              : {
    1236              :         struct rtio_cqe *cqe;
    1237              :         int res = 0;
    1238              : 
    1239              :         do {
    1240              :                 cqe = rtio_cqe_consume(r);
    1241              :                 if (cqe != NULL) {
    1242              :                         if ((cqe->result < 0) && (res == 0)) {
    1243              :                                 res = cqe->result;
    1244              :                         }
    1245              :                         rtio_cqe_release(r, cqe);
    1246              :                 }
    1247              :         } while (cqe != NULL);
    1248              : 
    1249              :         return res;
    1250              : }
    1251              : 
    1252              : /**
    1253              :  * @brief Compute the CQE flags from the rtio_iodev_sqe entry
    1254              :  *
    1255              :  * @param iodev_sqe The SQE entry in question.
    1256              :  * @return The value that should be set for the CQE's flags field.
    1257              :  */
    1258            1 : static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
    1259              : {
    1260              :         uint32_t flags = 0;
    1261              : 
    1262              : #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
    1263              :         if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
    1264              :                 struct rtio *r = iodev_sqe->r;
    1265              :                 struct sys_mem_blocks *mem_pool = r->block_pool;
    1266              :                 unsigned int blk_index = 0;
    1267              :                 unsigned int blk_count = 0;
    1268              : 
    1269              :                 if (iodev_sqe->sqe.rx.buf) {
    1270              :                         blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >>
    1271              :                                     mem_pool->info.blk_sz_shift;
    1272              :                         blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift;
    1273              :                 }
    1274              :                 flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
    1275              :         }
    1276              : #else
    1277              :         ARG_UNUSED(iodev_sqe);
    1278              : #endif
    1279              : 
    1280              :         return flags;
    1281              : }
    1282              : 
    1283              : /**
    1284              :  * @brief Retrieve the mempool buffer that was allocated for the CQE.
    1285              :  *
    1286              :  * If the RTIO context contains a memory pool, and the SQE was created by calling
    1287              :  * rtio_sqe_read_with_pool(), this function can be used to retrieve the memory associated with the
    1288              :  * read. Once processing is done, it should be released by calling rtio_release_buffer().
    1289              :  *
    1290              :  * @param[in] r RTIO context
    1291              :  * @param[in] cqe The CQE handling the event.
    1292              :  * @param[out] buff Pointer to the mempool buffer
    1293              :  * @param[out] buff_len Length of the allocated buffer
    1294              :  * @return 0 on success
    1295              :  * @return -EINVAL if the buffer wasn't allocated for this cqe
    1296              :  * @return -ENOTSUP if memory blocks are disabled
    1297              :  */
    1298            1 : __syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
    1299              :                                           uint8_t **buff, uint32_t *buff_len);
    1300              : 
    1301              : static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
    1302              :                                                      uint8_t **buff, uint32_t *buff_len)
    1303              : {
    1304              : #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
    1305              :         if (RTIO_CQE_FLAG_GET(cqe->flags) == RTIO_CQE_FLAG_MEMPOOL_BUFFER) {
    1306              :                 unsigned int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
    1307              :                 unsigned int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
    1308              :                 uint32_t blk_size = rtio_mempool_block_size(r);
    1309              : 
    1310              :                 *buff_len = blk_count * blk_size;
    1311              : 
    1312              :                 if (blk_count > 0) {
    1313              :                         *buff = r->block_pool->buffer + blk_idx * blk_size;
    1314              : 
    1315              :                         __ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
    1316              :                         __ASSERT_NO_MSG(*buff <
    1317              :                                 r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
    1318              :                 } else {
    1319              :                         *buff = NULL;
    1320              :                 }
    1321              :                 return 0;
    1322              :         }
    1323              :         return -EINVAL;
    1324              : #else
    1325              :         ARG_UNUSED(r);
    1326              :         ARG_UNUSED(cqe);
    1327              :         ARG_UNUSED(buff);
    1328              :         ARG_UNUSED(buff_len);
    1329              : 
    1330              :         return -ENOTSUP;
    1331              : #endif
    1332              : }
    1333              : 
    1334            0 : void rtio_executor_submit(struct rtio *r);
    1335            0 : void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
    1336            0 : void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
    1337              : 
    1338              : /**
    1339              :  * @brief Inform the executor of a submission completion with success
    1340              :  *
    1341              :  * This may start the next asynchronous request if one is available.
    1342              :  *
    1343              :  * @param iodev_sqe IODev Submission that has succeeded
    1344              :  * @param result Result of the request
    1345              :  */
    1346            1 : static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
    1347              : {
    1348              :         rtio_executor_ok(iodev_sqe, result);
    1349              : }
    1350              : 
    1351              : /**
    1352              :  * @brief Inform the executor of a submissions completion with error
    1353              :  *
    1354              :  * This SHALL fail the remaining submissions in the chain.
    1355              :  *
    1356              :  * @param iodev_sqe Submission that has failed
    1357              :  * @param result Result of the request
    1358              :  */
    1359            1 : static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
    1360              : {
    1361              :         rtio_executor_err(iodev_sqe, result);
    1362              : }
    1363              : 
    1364              : /**
    1365              :  * Submit a completion queue event with a given result and userdata
    1366              :  *
    1367              :  * Called by the executor to produce a completion queue event, no inherent
    1368              :  * locking is performed and this is not safe to do from multiple callers.
    1369              :  *
    1370              :  * @param r RTIO context
    1371              :  * @param result Integer result code (could be -errno)
    1372              :  * @param userdata Userdata to pass along to completion
    1373              :  * @param flags Flags to use for the CEQ see RTIO_CQE_FLAG_*
    1374              :  */
    1375            1 : static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
    1376              : {
    1377              :         struct rtio_cqe *cqe = rtio_cqe_acquire(r);
    1378              : 
    1379              :         if (cqe == NULL) {
    1380              :                 atomic_inc(&r->xcqcnt);
    1381              :         } else {
    1382              :                 cqe->result = result;
    1383              :                 cqe->userdata = userdata;
    1384              :                 cqe->flags = flags;
    1385              :                 rtio_cqe_produce(r, cqe);
    1386              : #ifdef CONFIG_RTIO_CONSUME_SEM
    1387              :                 k_sem_give(r->consume_sem);
    1388              : #endif
    1389              :         }
    1390              : 
    1391              :         /* atomic_t isn't guaranteed to wrap correctly as it could be signed, so
    1392              :          * we must resort to a cas loop.
    1393              :          */
    1394              :         atomic_t val, new_val;
    1395              : 
    1396              :         do {
    1397              :                 val = atomic_get(&r->cq_count);
    1398              :                 new_val = (atomic_t)((uintptr_t)val + 1);
    1399              :         } while (!atomic_cas(&r->cq_count, val, new_val));
    1400              : 
    1401              : #ifdef CONFIG_RTIO_SUBMIT_SEM
    1402              :         if (r->submit_count > 0) {
    1403              :                 r->submit_count--;
    1404              :                 if (r->submit_count == 0) {
    1405              :                         k_sem_give(r->submit_sem);
    1406              :                 }
    1407              :         }
    1408              : #endif
    1409              : }
    1410              : 
    1411              : #define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
    1412              : 
    1413              : /**
    1414              :  * @brief Get the buffer associate with the RX submission
    1415              :  *
    1416              :  * @param[in] iodev_sqe   The submission to probe
    1417              :  * @param[in] min_buf_len The minimum number of bytes needed for the operation
    1418              :  * @param[in] max_buf_len The maximum number of bytes needed for the operation
    1419              :  * @param[out] buf        Where to store the pointer to the buffer
    1420              :  * @param[out] buf_len    Where to store the size of the buffer
    1421              :  *
    1422              :  * @return 0 if @p buf and @p buf_len were successfully filled
    1423              :  * @return -ENOMEM Not enough memory for @p min_buf_len
    1424              :  */
    1425            1 : static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
    1426              :                                   uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
    1427              : {
    1428              :         struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
    1429              : 
    1430              : #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
    1431              :         if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
    1432              :                 struct rtio *r = iodev_sqe->r;
    1433              : 
    1434              :                 if (sqe->rx.buf != NULL) {
    1435              :                         if (sqe->rx.buf_len < min_buf_len) {
    1436              :                                 return -ENOMEM;
    1437              :                         }
    1438              :                         *buf = sqe->rx.buf;
    1439              :                         *buf_len = sqe->rx.buf_len;
    1440              :                         return 0;
    1441              :                 }
    1442              : 
    1443              :                 int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
    1444              :                 if (rc == 0) {
    1445              :                         sqe->rx.buf = *buf;
    1446              :                         sqe->rx.buf_len = *buf_len;
    1447              :                         return 0;
    1448              :                 }
    1449              : 
    1450              :                 return -ENOMEM;
    1451              :         }
    1452              : #else
    1453              :         ARG_UNUSED(max_buf_len);
    1454              : #endif
    1455              : 
    1456              :         if (sqe->rx.buf_len < min_buf_len) {
    1457              :                 return -ENOMEM;
    1458              :         }
    1459              : 
    1460              :         *buf = sqe->rx.buf;
    1461              :         *buf_len = sqe->rx.buf_len;
    1462              :         return 0;
    1463              : }
    1464              : 
    1465              : /**
    1466              :  * @brief Release memory that was allocated by the RTIO's memory pool
    1467              :  *
    1468              :  * If the RTIO context was created by a call to RTIO_DEFINE_WITH_MEMPOOL(), then the cqe data might
    1469              :  * contain a buffer that's owned by the RTIO context. In those cases (if the read request was
    1470              :  * configured via rtio_sqe_read_with_pool()) the buffer must be returned back to the pool.
    1471              :  *
    1472              :  * Call this function when processing is complete. This function will validate that the memory
    1473              :  * actually belongs to the RTIO context and will ignore invalid arguments.
    1474              :  *
    1475              :  * @param r RTIO context
    1476              :  * @param buff Pointer to the buffer to be released.
    1477              :  * @param buff_len Number of bytes to free (will be rounded up to nearest memory block).
    1478              :  */
    1479            1 : __syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
    1480              : 
    1481              : static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
    1482              : {
    1483              : #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
    1484              :         if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
    1485              :                 return;
    1486              :         }
    1487              : 
    1488              :         rtio_block_pool_free(r, buff, buff_len);
    1489              : #else
    1490              :         ARG_UNUSED(r);
    1491              :         ARG_UNUSED(buff);
    1492              :         ARG_UNUSED(buff_len);
    1493              : #endif
    1494              : }
    1495              : 
    1496              : /**
    1497              :  * Grant access to an RTIO context to a user thread
    1498              :  *
    1499              :  * @param r RTIO context
    1500              :  * @param t Thread to grant permissions to
    1501              :  */
    1502            1 : static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
    1503              : {
    1504              :         k_object_access_grant(r, t);
    1505              : 
    1506              : #ifdef CONFIG_RTIO_SUBMIT_SEM
    1507              :         k_object_access_grant(r->submit_sem, t);
    1508              : #endif
    1509              : 
    1510              : #ifdef CONFIG_RTIO_CONSUME_SEM
    1511              :         k_object_access_grant(r->consume_sem, t);
    1512              : #endif
    1513              : }
    1514              : 
    1515              : 
    1516              : /**
    1517              :  * Revoke access to an RTIO context from a user thread
    1518              :  *
    1519              :  * @param r RTIO context
    1520              :  * @param t Thread to revoke permissions from
    1521              :  */
    1522            1 : static inline void rtio_access_revoke(struct rtio *r, struct k_thread *t)
    1523              : {
    1524              :         k_object_access_revoke(r, t);
    1525              : 
    1526              : #ifdef CONFIG_RTIO_SUBMIT_SEM
    1527              :         k_object_access_revoke(r->submit_sem, t);
    1528              : #endif
    1529              : 
    1530              : #ifdef CONFIG_RTIO_CONSUME_SEM
    1531              :         k_object_access_revoke(r->consume_sem, t);
    1532              : #endif
    1533              : }
    1534              : 
    1535              : /**
    1536              :  * @brief Attempt to cancel an SQE
    1537              :  *
    1538              :  * If possible (not currently executing), cancel an SQE and generate a failure with -ECANCELED
    1539              :  * result.
    1540              :  *
    1541              :  * @param[in] sqe The SQE to cancel
    1542              :  * @return 0 if the SQE was flagged for cancellation
    1543              :  * @return <0 on error
    1544              :  */
    1545            1 : __syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
    1546              : 
    1547              : static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
    1548              : {
    1549              :         struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
    1550              : 
    1551              :         do {
    1552              :                 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
    1553              :                 iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
    1554              :         } while (iodev_sqe != NULL);
    1555              : 
    1556              :         return 0;
    1557              : }
    1558              : 
    1559              : /**
    1560              :  * @brief Signal an AWAIT SQE
    1561              :  *
    1562              :  * If the SQE is currently blocking execution, execution is unblocked. If the SQE is not
    1563              :  * currently blocking execution, The SQE will be skipped.
    1564              :  *
    1565              :  * @note To await the AWAIT SQE blocking execution, chain a nop or callback SQE before
    1566              :  * the await SQE.
    1567              :  *
    1568              :  * @param[in] sqe The SQE to signal
    1569              :  */
    1570            1 : __syscall void rtio_sqe_signal(struct rtio_sqe *sqe);
    1571              : 
    1572              : static inline void z_impl_rtio_sqe_signal(struct rtio_sqe *sqe)
    1573              : {
    1574              :         struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
    1575              : 
    1576              :         if (!atomic_cas(&iodev_sqe->sqe.await.ok, 0, 1)) {
    1577              :                 iodev_sqe->sqe.await.callback(iodev_sqe, iodev_sqe->sqe.await.userdata);
    1578              :         }
    1579              : }
    1580              : 
    1581              : /**
    1582              :  * @brief Await an AWAIT SQE signal from RTIO IODEV
    1583              :  *
    1584              :  * If the SQE is already signaled, the callback is called immediately. Otherwise the
    1585              :  * callback will be called once the AWAIT SQE is signaled.
    1586              :  *
    1587              :  * @param[in] iodev_sqe The IODEV SQE to await signaled
    1588              :  * @param[in] callback Callback called when SQE is signaled
    1589              :  * @param[in] userdata User data passed to callback
    1590              :  */
    1591            1 : static inline void rtio_iodev_sqe_await_signal(struct rtio_iodev_sqe *iodev_sqe,
    1592              :                                                rtio_signaled_t callback,
    1593              :                                                void *userdata)
    1594              : {
    1595              :         iodev_sqe->sqe.await.callback = callback;
    1596              :         iodev_sqe->sqe.await.userdata = userdata;
    1597              : 
    1598              :         if (!atomic_cas(&iodev_sqe->sqe.await.ok, 0, 1)) {
    1599              :                 callback(iodev_sqe, userdata);
    1600              :         }
    1601              : }
    1602              : 
    1603              : /**
    1604              :  * @brief Copy an array of SQEs into the queue and get resulting handles back
    1605              :  *
    1606              :  * Copies one or more SQEs into the RTIO context and optionally returns their generated SQE handles.
    1607              :  * Handles can be used to cancel events via the rtio_sqe_cancel() call.
    1608              :  *
    1609              :  * @param[in]  r RTIO context
    1610              :  * @param[in]  sqes Pointer to an array of SQEs
    1611              :  * @param[out] handle Optional pointer to @ref rtio_sqe pointer to store the handle of the
    1612              :  *             first generated SQE. Use NULL to ignore.
    1613              :  * @param[in]  sqe_count Count of sqes in array
    1614              :  *
    1615              :  * @retval 0 success
    1616              :  * @retval -ENOMEM not enough room in the queue
    1617              :  */
    1618            1 : __syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
    1619              :                                            struct rtio_sqe **handle, size_t sqe_count);
    1620              : 
    1621              : static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
    1622              :                                                       struct rtio_sqe **handle,
    1623              :                                                       size_t sqe_count)
    1624              : {
    1625              :         struct rtio_sqe *sqe;
    1626              :         uint32_t acquirable = rtio_sqe_acquirable(r);
    1627              : 
    1628              :         if (acquirable < sqe_count) {
    1629              :                 return -ENOMEM;
    1630              :         }
    1631              : 
    1632              :         for (unsigned long i = 0; i < sqe_count; i++) {
    1633              :                 sqe = rtio_sqe_acquire(r);
    1634              :                 __ASSERT_NO_MSG(sqe != NULL);
    1635              :                 if (handle != NULL && i == 0) {
    1636              :                         *handle = sqe;
    1637              :                 }
    1638              :                 *sqe = sqes[i];
    1639              :         }
    1640              : 
    1641              :         return 0;
    1642              : }
    1643              : 
    1644              : /**
    1645              :  * @brief Copy an array of SQEs into the queue
    1646              :  *
    1647              :  * Useful if a batch of submissions is stored in ROM or
    1648              :  * RTIO is used from user mode where a copy must be made.
    1649              :  *
    1650              :  * Partial copying is not done as chained SQEs need to be submitted
    1651              :  * as a whole set.
    1652              :  *
    1653              :  * @param r RTIO context
    1654              :  * @param sqes Pointer to an array of SQEs
    1655              :  * @param sqe_count Count of sqes in array
    1656              :  *
    1657              :  * @retval 0 success
    1658              :  * @retval -ENOMEM not enough room in the queue
    1659              :  */
    1660            1 : static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
    1661              : {
    1662              :         return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
    1663              : }
    1664              : 
    1665              : /**
    1666              :  * @brief Copy an array of CQEs from the queue
    1667              :  *
    1668              :  * Copies from the RTIO context and its queue completion queue
    1669              :  * events, waiting for the given time period to gather the number
    1670              :  * of completions requested.
    1671              :  *
    1672              :  * @param r RTIO context
    1673              :  * @param cqes Pointer to an array of SQEs
    1674              :  * @param cqe_count Count of sqes in array
    1675              :  * @param timeout Timeout to wait for each completion event. Total wait time is
    1676              :  *                potentially timeout*cqe_count at maximum.
    1677              :  *
    1678              :  * @retval copy_count Count of copied CQEs (0 to cqe_count)
    1679              :  */
    1680            1 : __syscall int rtio_cqe_copy_out(struct rtio *r,
    1681              :                                 struct rtio_cqe *cqes,
    1682              :                                 size_t cqe_count,
    1683              :                                 k_timeout_t timeout);
    1684              : static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
    1685              :                                            struct rtio_cqe *cqes,
    1686              :                                            size_t cqe_count,
    1687              :                                            k_timeout_t timeout)
    1688              : {
    1689              :         size_t copied = 0;
    1690              :         struct rtio_cqe *cqe;
    1691              :         k_timepoint_t end = sys_timepoint_calc(timeout);
    1692              : 
    1693              :         do {
    1694              :                 cqe = K_TIMEOUT_EQ(timeout, K_FOREVER) ? rtio_cqe_consume_block(r)
    1695              :                                                        : rtio_cqe_consume(r);
    1696              :                 if (cqe == NULL) {
    1697              :                         Z_SPIN_DELAY(25);
    1698              :                         continue;
    1699              :                 }
    1700              :                 cqes[copied++] = *cqe;
    1701              :                 rtio_cqe_release(r, cqe);
    1702              :         } while (copied < cqe_count && !sys_timepoint_expired(end));
    1703              : 
    1704              :         return copied;
    1705              : }
    1706              : 
    1707              : /**
    1708              :  * @brief Submit I/O requests to the underlying executor
    1709              :  *
    1710              :  * Submits the queue of submission queue events to the executor.
    1711              :  * The executor will do the work of managing tasks representing each
    1712              :  * submission chain, freeing submission queue events when done, and
    1713              :  * producing completion queue events as submissions are completed.
    1714              :  *
    1715              :  * @warning It is undefined behavior to have re-entrant calls to submit
    1716              :  *
    1717              :  * @param r RTIO context
    1718              :  * @param wait_count Number of submissions to wait for completion of.
    1719              :  *
    1720              :  * @retval 0 On success
    1721              :  */
    1722            1 : __syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
    1723              : 
    1724              : #ifdef CONFIG_RTIO_SUBMIT_SEM
    1725              : static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
    1726              : {
    1727              :         int res = 0;
    1728              : 
    1729              :         if (wait_count > 0) {
    1730              :                 __ASSERT(!k_is_in_isr(),
    1731              :                          "expected rtio submit with wait count to be called from a thread");
    1732              : 
    1733              :                 k_sem_reset(r->submit_sem);
    1734              :                 r->submit_count = wait_count;
    1735              :         }
    1736              : 
    1737              :         rtio_executor_submit(r);
    1738              : 
    1739              :         if (wait_count > 0) {
    1740              :                 res = k_sem_take(r->submit_sem, K_FOREVER);
    1741              :                 __ASSERT(res == 0,
    1742              :                          "semaphore was reset or timed out while waiting on completions!");
    1743              :         }
    1744              : 
    1745              :         return res;
    1746              : }
    1747              : #else
    1748              : static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
    1749              : {
    1750              : 
    1751              :         int res = 0;
    1752              :         uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count);
    1753              :         uintptr_t cq_complete_count = cq_count + wait_count;
    1754              :         bool wraps = cq_complete_count < cq_count;
    1755              : 
    1756              :         rtio_executor_submit(r);
    1757              : 
    1758              :         if (wraps) {
    1759              :                 while ((uintptr_t)atomic_get(&r->cq_count) >= cq_count) {
    1760              :                         Z_SPIN_DELAY(10);
    1761              :                         k_yield();
    1762              :                 }
    1763              :         }
    1764              : 
    1765              :         while ((uintptr_t)atomic_get(&r->cq_count) < cq_complete_count) {
    1766              :                 Z_SPIN_DELAY(10);
    1767              :                 k_yield();
    1768              :         }
    1769              : 
    1770              :         return res;
    1771              : }
    1772              : #endif /* CONFIG_RTIO_SUBMIT_SEM */
    1773              : 
    1774              : /**
    1775              :  * @brief Pool of RTIO contexts to use with dynamically created threads
    1776              :  */
    1777            1 : struct rtio_pool {
    1778              :         /** Size of the pool */
    1779            1 :         size_t pool_size;
    1780              : 
    1781              :         /** Array containing contexts of the pool */
    1782            1 :         struct rtio **contexts;
    1783              : 
    1784              :         /** Atomic bitmap to signal a member is used/unused */
    1785            1 :         atomic_t *used;
    1786              : };
    1787              : 
    1788              : /**
    1789              :  * @brief Obtain an RTIO context from a pool
    1790              :  *
    1791              :  * @param pool RTIO pool to acquire a context from
    1792              :  *
    1793              :  * @retval NULL no available contexts
    1794              :  * @retval r Valid context with permissions granted to the calling thread
    1795              :  */
    1796            1 : __syscall struct rtio *rtio_pool_acquire(struct rtio_pool *pool);
    1797              : 
    1798              : static inline struct rtio *z_impl_rtio_pool_acquire(struct rtio_pool *pool)
    1799              : {
    1800              :         struct rtio *r = NULL;
    1801              : 
    1802              :         for (size_t i = 0; i < pool->pool_size; i++) {
    1803              :                 if (atomic_test_and_set_bit(pool->used, i) == 0) {
    1804              :                         r = pool->contexts[i];
    1805              :                         break;
    1806              :                 }
    1807              :         }
    1808              : 
    1809              :         if (r != NULL) {
    1810              :                 rtio_access_grant(r, k_current_get());
    1811              :         }
    1812              : 
    1813              :         return r;
    1814              : }
    1815              : 
    1816              : /**
    1817              :  * @brief Return an RTIO context to a pool
    1818              :  *
    1819              :  * @param pool RTIO pool to return a context to
    1820              :  * @param r RTIO context to return to the pool
    1821              :  */
    1822            1 : __syscall void rtio_pool_release(struct rtio_pool *pool, struct rtio *r);
    1823              : 
    1824              : static inline void z_impl_rtio_pool_release(struct rtio_pool *pool, struct rtio *r)
    1825              : {
    1826              : 
    1827              :         if (k_is_user_context()) {
    1828              :                 rtio_access_revoke(r, k_current_get());
    1829              :         }
    1830              : 
    1831              :         for (size_t i = 0; i < pool->pool_size; i++) {
    1832              :                 if (pool->contexts[i] == r) {
    1833              :                         atomic_clear_bit(pool->used, i);
    1834              :                         break;
    1835              :                 }
    1836              :         }
    1837              : }
    1838              : 
    1839              : /* clang-format off */
    1840              : 
    1841              : /** @cond ignore */
    1842              : 
    1843              : #define Z_RTIO_POOL_NAME_N(n, name)                                             \
    1844              :         name##_##n
    1845              : 
    1846              : #define Z_RTIO_POOL_DEFINE_N(n, name, sq_sz, cq_sz)                             \
    1847              :         RTIO_DEFINE(Z_RTIO_POOL_NAME_N(n, name), sq_sz, cq_sz)
    1848              : 
    1849              : #define Z_RTIO_POOL_REF_N(n, name)                                              \
    1850              :         &Z_RTIO_POOL_NAME_N(n, name)
    1851              : 
    1852              : /** @endcond */
    1853              : 
    1854              : /**
    1855              :  * @brief Statically define and initialize a pool of RTIO contexts
    1856              :  *
    1857              :  * @param name Name of the RTIO pool
    1858              :  * @param pool_sz Number of RTIO contexts to allocate in the pool
    1859              :  * @param sq_sz Size of the submission queue entry pool per context
    1860              :  * @param cq_sz Size of the completion queue entry pool per context
    1861              :  */
    1862            1 : #define RTIO_POOL_DEFINE(name, pool_sz, sq_sz, cq_sz)                           \
    1863              :         LISTIFY(pool_sz, Z_RTIO_POOL_DEFINE_N, (;), name, sq_sz, cq_sz);        \
    1864              :         static struct rtio *name##_contexts[] = {                               \
    1865              :                 LISTIFY(pool_sz, Z_RTIO_POOL_REF_N, (,), name)                  \
    1866              :         };                                                                      \
    1867              :         ATOMIC_DEFINE(name##_used, pool_sz);                                    \
    1868              :         STRUCT_SECTION_ITERABLE(rtio_pool, name) = {                            \
    1869              :                 .pool_size = pool_sz,                                           \
    1870              :                 .contexts = name##_contexts,                                    \
    1871              :                 .used = name##_used,                                            \
    1872              :         }
    1873              : 
    1874              : /* clang-format on */
    1875              : 
    1876              : /**
    1877              :  * @}
    1878              :  */
    1879              : 
    1880              : #ifdef __cplusplus
    1881              : }
    1882              : #endif
    1883              : 
    1884              : #include <zephyr/syscalls/rtio.h>
    1885              : 
    1886              : #endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
        

Generated by: LCOV version 2.0-1