Line data Source code
1 1 : /*
2 : * Copyright (c) 2016 Intel Corporation.
3 : *
4 : * SPDX-License-Identifier: Apache-2.0
5 : */
6 :
7 : /**
8 : * @file
9 : * @brief Crypto Cipher APIs
10 : *
11 : * This file contains the Crypto Abstraction layer APIs.
12 : *
13 : * [Experimental] Users should note that the APIs can change
14 : * as a part of ongoing development.
15 : */
16 :
17 : #ifndef ZEPHYR_INCLUDE_CRYPTO_H_
18 : #define ZEPHYR_INCLUDE_CRYPTO_H_
19 :
20 : #include <zephyr/device.h>
21 : #include <errno.h>
22 : #include <zephyr/sys/util.h>
23 : #include <zephyr/sys/__assert.h>
24 : #include <zephyr/crypto/hash.h>
25 : #include "cipher.h"
26 :
27 : /**
28 : * @brief Crypto APIs
29 : * @defgroup crypto Crypto
30 : * @since 1.7
31 : * @version 1.0.0
32 : * @ingroup os_services
33 : * @{
34 : */
35 :
36 :
37 : /* ctx.flags values. Not all drivers support all flags.
38 : * A user app can query the supported hw / driver
39 : * capabilities via provided API (crypto_query_hwcaps()), and choose a
40 : * supported config during the session setup.
41 : */
42 0 : #define CAP_OPAQUE_KEY_HNDL BIT(0)
43 0 : #define CAP_RAW_KEY BIT(1)
44 :
45 : /* TBD to define */
46 0 : #define CAP_KEY_LOADING_API BIT(2)
47 :
48 : /** Whether the output is placed in separate buffer or not */
49 1 : #define CAP_INPLACE_OPS BIT(3)
50 0 : #define CAP_SEPARATE_IO_BUFS BIT(4)
51 :
52 : /**
53 : * These denotes if the output (completion of a cipher_xxx_op) is conveyed
54 : * by the op function returning, or it is conveyed by an async notification
55 : */
56 1 : #define CAP_SYNC_OPS BIT(5)
57 0 : #define CAP_ASYNC_OPS BIT(6)
58 :
59 : /** Whether the hardware/driver supports autononce feature */
60 1 : #define CAP_AUTONONCE BIT(7)
61 :
62 : /** Don't prefix IV to cipher blocks */
63 1 : #define CAP_NO_IV_PREFIX BIT(8)
64 :
65 : /* More flags to be added as necessary */
66 :
67 : /** @brief Crypto driver API definition. */
68 1 : __subsystem struct crypto_driver_api {
69 0 : int (*query_hw_caps)(const struct device *dev);
70 :
71 : /* Setup a crypto session */
72 0 : int (*cipher_begin_session)(const struct device *dev, struct cipher_ctx *ctx,
73 : enum cipher_algo algo, enum cipher_mode mode,
74 : enum cipher_op op_type);
75 :
76 : /* Tear down an established session */
77 0 : int (*cipher_free_session)(const struct device *dev, struct cipher_ctx *ctx);
78 :
79 : /* Register async crypto op completion callback with the driver */
80 0 : int (*cipher_async_callback_set)(const struct device *dev,
81 : cipher_completion_cb cb);
82 :
83 : /* Setup a hash session */
84 0 : int (*hash_begin_session)(const struct device *dev, struct hash_ctx *ctx,
85 : enum hash_algo algo);
86 : /* Tear down an established hash session */
87 0 : int (*hash_free_session)(const struct device *dev, struct hash_ctx *ctx);
88 : /* Register async hash op completion callback with the driver */
89 0 : int (*hash_async_callback_set)(const struct device *dev,
90 : hash_completion_cb cb);
91 : };
92 :
93 : /* Following are the public API a user app may call.
94 : * The first two relate to crypto "session" setup / teardown. Further we
95 : * have four cipher mode specific (CTR, CCM, CBC ...) calls to perform the
96 : * actual crypto operation in the context of a session. Also we have an
97 : * API to provide the callback for async operations.
98 : */
99 :
100 : /**
101 : * @brief Query the crypto hardware capabilities
102 : *
103 : * This API is used by the app to query the capabilities supported by the
104 : * crypto device. Based on this the app can specify a subset of the supported
105 : * options to be honored for a session during cipher_begin_session().
106 : *
107 : * @param dev Pointer to the device structure for the driver instance.
108 : *
109 : * @return bitmask of supported options.
110 : */
111 1 : static inline int crypto_query_hwcaps(const struct device *dev)
112 : {
113 : struct crypto_driver_api *api;
114 : int tmp;
115 :
116 : api = (struct crypto_driver_api *) dev->api;
117 :
118 : tmp = api->query_hw_caps(dev);
119 :
120 : __ASSERT((tmp & (CAP_OPAQUE_KEY_HNDL | CAP_RAW_KEY)) != 0,
121 : "Driver should support at least one key type: RAW/Opaque");
122 :
123 : __ASSERT((tmp & (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS)) != 0,
124 : "Driver should support at least one IO buf type: Inplace/separate");
125 :
126 : __ASSERT((tmp & (CAP_SYNC_OPS | CAP_ASYNC_OPS)) != 0,
127 : "Driver should support at least one op-type: sync/async");
128 : return tmp;
129 :
130 : }
131 :
132 : /**
133 : * @}
134 : */
135 :
136 : /**
137 : * @brief Crypto Cipher APIs
138 : * @defgroup crypto_cipher Cipher
139 : * @ingroup crypto
140 : * @{
141 : */
142 :
143 : /**
144 : * @brief Setup a crypto session
145 : *
146 : * Initializes one time parameters, like the session key, algorithm and cipher
147 : * mode which may remain constant for all operations in the session. The state
148 : * may be cached in hardware and/or driver data state variables.
149 : *
150 : * @param dev Pointer to the device structure for the driver instance.
151 : * @param ctx Pointer to the context structure. Various one time
152 : * parameters like key, keylength, etc. are supplied via
153 : * this structure. The structure documentation specifies
154 : * which fields are to be populated by the app before
155 : * making this call.
156 : * @param algo The crypto algorithm to be used in this session. e.g AES
157 : * @param mode The cipher mode to be used in this session. e.g CBC, CTR
158 : * @param optype Whether we should encrypt or decrypt in this session
159 : *
160 : * @return 0 on success, negative errno code on fail.
161 : */
162 1 : static inline int cipher_begin_session(const struct device *dev,
163 : struct cipher_ctx *ctx,
164 : enum cipher_algo algo,
165 : enum cipher_mode mode,
166 : enum cipher_op optype)
167 : {
168 : struct crypto_driver_api *api;
169 : uint32_t flags;
170 :
171 : api = (struct crypto_driver_api *) dev->api;
172 : ctx->device = dev;
173 : ctx->ops.cipher_mode = mode;
174 :
175 : flags = (ctx->flags & (CAP_OPAQUE_KEY_HNDL | CAP_RAW_KEY));
176 : __ASSERT(flags != 0U, "Keytype missing: RAW Key or OPAQUE handle");
177 : __ASSERT(flags != (CAP_OPAQUE_KEY_HNDL | CAP_RAW_KEY),
178 : "conflicting options for keytype");
179 :
180 : flags = (ctx->flags & (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS));
181 : __ASSERT(flags != 0U, "IO buffer type missing");
182 : __ASSERT(flags != (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS),
183 : "conflicting options for IO buffer type");
184 :
185 : flags = (ctx->flags & (CAP_SYNC_OPS | CAP_ASYNC_OPS));
186 : __ASSERT(flags != 0U, "sync/async type missing");
187 : __ASSERT(flags != (CAP_SYNC_OPS | CAP_ASYNC_OPS),
188 : "conflicting options for sync/async");
189 :
190 : return api->cipher_begin_session(dev, ctx, algo, mode, optype);
191 : }
192 :
193 : /**
194 : * @brief Cleanup a crypto session
195 : *
196 : * Clears the hardware and/or driver state of a previous session.
197 : *
198 : * @param dev Pointer to the device structure for the driver instance.
199 : * @param ctx Pointer to the crypto context structure of the session
200 : * to be freed.
201 : *
202 : * @return 0 on success, negative errno code on fail.
203 : */
204 1 : static inline int cipher_free_session(const struct device *dev,
205 : struct cipher_ctx *ctx)
206 : {
207 : struct crypto_driver_api *api;
208 :
209 : api = (struct crypto_driver_api *) dev->api;
210 :
211 : return api->cipher_free_session(dev, ctx);
212 : }
213 :
214 : /**
215 : * @brief Registers an async crypto op completion callback with the driver
216 : *
217 : * The application can register an async crypto op completion callback handler
218 : * to be invoked by the driver, on completion of a prior request submitted via
219 : * cipher_do_op(). Based on crypto device hardware semantics, this is likely to
220 : * be invoked from an ISR context.
221 : *
222 : * @param dev Pointer to the device structure for the driver instance.
223 : * @param cb Pointer to application callback to be called by the driver.
224 : *
225 : * @return 0 on success, -ENOTSUP if the driver does not support async op,
226 : * negative errno code on other error.
227 : */
228 1 : static inline int cipher_callback_set(const struct device *dev,
229 : cipher_completion_cb cb)
230 : {
231 : struct crypto_driver_api *api;
232 :
233 : api = (struct crypto_driver_api *) dev->api;
234 :
235 : if (api->cipher_async_callback_set) {
236 : return api->cipher_async_callback_set(dev, cb);
237 : }
238 :
239 : return -ENOTSUP;
240 :
241 : }
242 :
243 : /**
244 : * @brief Perform single-block crypto operation (ECB cipher mode). This
245 : * should not be overloaded to operate on multiple blocks for security reasons.
246 : *
247 : * @param ctx Pointer to the crypto context of this op.
248 : * @param pkt Structure holding the input/output buffer pointers.
249 : *
250 : * @return 0 on success, negative errno code on fail.
251 : */
252 1 : static inline int cipher_block_op(struct cipher_ctx *ctx,
253 : struct cipher_pkt *pkt)
254 : {
255 : __ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_ECB, "ECB mode "
256 : "session invoking a different mode handler");
257 :
258 : pkt->ctx = ctx;
259 : return ctx->ops.block_crypt_hndlr(ctx, pkt);
260 : }
261 :
262 : /**
263 : * @brief Perform Cipher Block Chaining (CBC) crypto operation.
264 : *
265 : * @param ctx Pointer to the crypto context of this op.
266 : * @param pkt Structure holding the input/output buffer pointers.
267 : * @param iv Initialization Vector (IV) for the operation. Same
268 : * IV value should not be reused across multiple
269 : * operations (within a session context) for security.
270 : *
271 : * @return 0 on success, negative errno code on fail.
272 : */
273 1 : static inline int cipher_cbc_op(struct cipher_ctx *ctx,
274 : struct cipher_pkt *pkt, uint8_t *iv)
275 : {
276 : __ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_CBC, "CBC mode "
277 : "session invoking a different mode handler");
278 :
279 : pkt->ctx = ctx;
280 : return ctx->ops.cbc_crypt_hndlr(ctx, pkt, iv);
281 : }
282 :
283 : /**
284 : * @brief Perform Counter (CTR) mode crypto operation.
285 : *
286 : * @param ctx Pointer to the crypto context of this op.
287 : * @param pkt Structure holding the input/output buffer pointers.
288 : * @param iv Initialization Vector (IV) for the operation. We use a
289 : * split counter formed by appending IV and ctr.
290 : * Consequently ivlen = keylen - ctrlen. 'ctrlen' is
291 : * specified during session setup through the
292 : * 'ctx.mode_params.ctr_params.ctr_len' parameter. IV
293 : * should not be reused across multiple operations
294 : * (within a session context) for security. The non-IV
295 : * part of the split counter is transparent to the caller
296 : * and is fully managed by the crypto provider.
297 : *
298 : * @return 0 on success, negative errno code on fail.
299 : */
300 1 : static inline int cipher_ctr_op(struct cipher_ctx *ctx,
301 : struct cipher_pkt *pkt, uint8_t *iv)
302 : {
303 : __ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_CTR, "CTR mode "
304 : "session invoking a different mode handler");
305 :
306 : pkt->ctx = ctx;
307 : return ctx->ops.ctr_crypt_hndlr(ctx, pkt, iv);
308 : }
309 :
310 : /**
311 : * @brief Perform Counter with CBC-MAC (CCM) mode crypto operation
312 : *
313 : * @param ctx Pointer to the crypto context of this op.
314 : * @param pkt Structure holding the input/output, Associated
315 : * Data (AD) and auth tag buffer pointers.
316 : * @param nonce Nonce for the operation. Same nonce value should not
317 : * be reused across multiple operations (within a
318 : * session context) for security.
319 : *
320 : * @return 0 on success, negative errno code on fail.
321 : */
322 1 : static inline int cipher_ccm_op(struct cipher_ctx *ctx,
323 : struct cipher_aead_pkt *pkt, uint8_t *nonce)
324 : {
325 : __ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_CCM, "CCM mode "
326 : "session invoking a different mode handler");
327 :
328 : pkt->pkt->ctx = ctx;
329 : return ctx->ops.ccm_crypt_hndlr(ctx, pkt, nonce);
330 : }
331 :
332 : /**
333 : * @brief Perform Galois/Counter Mode (GCM) crypto operation
334 : *
335 : * @param ctx Pointer to the crypto context of this op.
336 : * @param pkt Structure holding the input/output, Associated
337 : * Data (AD) and auth tag buffer pointers.
338 : * @param nonce Nonce for the operation. Same nonce value should not
339 : * be reused across multiple operations (within a
340 : * session context) for security.
341 : *
342 : * @return 0 on success, negative errno code on fail.
343 : */
344 1 : static inline int cipher_gcm_op(struct cipher_ctx *ctx,
345 : struct cipher_aead_pkt *pkt, uint8_t *nonce)
346 : {
347 : __ASSERT(ctx->ops.cipher_mode == CRYPTO_CIPHER_MODE_GCM, "GCM mode "
348 : "session invoking a different mode handler");
349 :
350 : pkt->pkt->ctx = ctx;
351 : return ctx->ops.gcm_crypt_hndlr(ctx, pkt, nonce);
352 : }
353 :
354 :
355 : /**
356 : * @}
357 : */
358 :
359 : /**
360 : * @brief Crypto Hash APIs
361 : * @defgroup crypto_hash Hash
362 : * @ingroup crypto
363 : * @{
364 : */
365 :
366 :
367 : /**
368 : * @brief Setup a hash session
369 : *
370 : * Initializes one time parameters, like the algorithm which may
371 : * remain constant for all operations in the session. The state may be
372 : * cached in hardware and/or driver data state variables.
373 : *
374 : * @param dev Pointer to the device structure for the driver instance.
375 : * @param ctx Pointer to the context structure. Various one time
376 : * parameters like session capabilities and algorithm are
377 : * supplied via this structure. The structure documentation
378 : * specifies which fields are to be populated by the app
379 : * before making this call.
380 : * @param algo The hash algorithm to be used in this session. e.g sha256
381 : *
382 : * @return 0 on success, negative errno code on fail.
383 : */
384 1 : static inline int hash_begin_session(const struct device *dev,
385 : struct hash_ctx *ctx,
386 : enum hash_algo algo)
387 : {
388 : uint32_t flags;
389 : struct crypto_driver_api *api;
390 :
391 : api = (struct crypto_driver_api *) dev->api;
392 : ctx->device = dev;
393 :
394 : flags = (ctx->flags & (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS));
395 : __ASSERT(flags != 0U, "IO buffer type missing");
396 : __ASSERT(flags != (CAP_INPLACE_OPS | CAP_SEPARATE_IO_BUFS),
397 : "conflicting options for IO buffer type");
398 :
399 : flags = (ctx->flags & (CAP_SYNC_OPS | CAP_ASYNC_OPS));
400 : __ASSERT(flags != 0U, "sync/async type missing");
401 : __ASSERT(flags != (CAP_SYNC_OPS | CAP_ASYNC_OPS),
402 : "conflicting options for sync/async");
403 :
404 :
405 : return api->hash_begin_session(dev, ctx, algo);
406 : }
407 :
408 : /**
409 : * @brief Cleanup a hash session
410 : *
411 : * Clears the hardware and/or driver state of a session. @see hash_begin_session
412 : *
413 : * @param dev Pointer to the device structure for the driver instance.
414 : * @param ctx Pointer to the crypto hash context structure of the session
415 : * to be freed.
416 : *
417 : * @return 0 on success, negative errno code on fail.
418 : */
419 1 : static inline int hash_free_session(const struct device *dev,
420 : struct hash_ctx *ctx)
421 : {
422 : struct crypto_driver_api *api;
423 :
424 : api = (struct crypto_driver_api *) dev->api;
425 :
426 : return api->hash_free_session(dev, ctx);
427 : }
428 :
429 : /**
430 : * @brief Registers an async hash completion callback with the driver
431 : *
432 : * The application can register an async hash completion callback handler
433 : * to be invoked by the driver, on completion of a prior request submitted via
434 : * hash_compute(). Based on crypto device hardware semantics, this is likely to
435 : * be invoked from an ISR context.
436 : *
437 : * @param dev Pointer to the device structure for the driver instance.
438 : * @param cb Pointer to application callback to be called by the driver.
439 : *
440 : * @return 0 on success, -ENOTSUP if the driver does not support async op,
441 : * negative errno code on other error.
442 : */
443 1 : static inline int hash_callback_set(const struct device *dev,
444 : hash_completion_cb cb)
445 : {
446 : struct crypto_driver_api *api;
447 :
448 : api = (struct crypto_driver_api *) dev->api;
449 :
450 : if (api->hash_async_callback_set) {
451 : return api->hash_async_callback_set(dev, cb);
452 : }
453 :
454 : return -ENOTSUP;
455 :
456 : }
457 :
458 : /**
459 : * @brief Perform a cryptographic hash function.
460 : *
461 : * @param ctx Pointer to the hash context of this op.
462 : * @param pkt Structure holding the input/output.
463 :
464 : * @return 0 on success, negative errno code on fail.
465 : */
466 1 : static inline int hash_compute(struct hash_ctx *ctx, struct hash_pkt *pkt)
467 : {
468 : pkt->ctx = ctx;
469 :
470 : return ctx->hash_hndlr(ctx, pkt, true);
471 : }
472 :
473 : /**
474 : * @brief Perform a cryptographic multipart hash operation.
475 : *
476 : * This function can be called zero or more times, passing a slice of
477 : * the data. The hash is calculated using all the given pieces.
478 : * To calculate the hash call @c hash_compute().
479 : *
480 : * @param ctx Pointer to the hash context of this op.
481 : * @param pkt Structure holding the input.
482 :
483 : * @return 0 on success, negative errno code on fail.
484 : */
485 1 : static inline int hash_update(struct hash_ctx *ctx, struct hash_pkt *pkt)
486 : {
487 : pkt->ctx = ctx;
488 :
489 : return ctx->hash_hndlr(ctx, pkt, false);
490 : }
491 :
492 : /**
493 : * @}
494 : */
495 :
496 : #endif /* ZEPHYR_INCLUDE_CRYPTO_H_ */
|