VirtualBox

source: vbox/trunk/src/libs/openssl-3.1.0/crypto/evp/e_aes.c@ 100908

最後變更 在這個檔案從100908是 99366,由 vboxsync 提交於 22 月 前

openssl-3.1.0: Applied and adjusted our OpenSSL changes to 3.0.7. bugref:10418

檔案大小: 133.5 KB
 
1/*
2 * Copyright 2001-2022 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10/*
11 * This file uses the low-level AES functions (which are deprecated for
12 * non-internal use) in order to implement the EVP AES ciphers.
13 */
14#include "internal/deprecated.h"
15
16#include <string.h>
17#include <assert.h>
18#include <openssl/opensslconf.h>
19#include <openssl/crypto.h>
20#include <openssl/evp.h>
21#include <openssl/err.h>
22#include <openssl/aes.h>
23#include <openssl/rand.h>
24#include <openssl/cmac.h>
25#include "crypto/evp.h"
26#include "internal/cryptlib.h"
27#include "crypto/modes.h"
28#include "crypto/siv.h"
29#include "crypto/aes_platform.h"
30#include "evp_local.h"
31
32typedef struct {
33 union {
34 OSSL_UNION_ALIGN;
35 AES_KEY ks;
36 } ks;
37 block128_f block;
38 union {
39 cbc128_f cbc;
40 ctr128_f ctr;
41 } stream;
42} EVP_AES_KEY;
43
44typedef struct {
45 union {
46 OSSL_UNION_ALIGN;
47 AES_KEY ks;
48 } ks; /* AES key schedule to use */
49 int key_set; /* Set if key initialised */
50 int iv_set; /* Set if an iv is set */
51 GCM128_CONTEXT gcm;
52 unsigned char *iv; /* Temporary IV store */
53 int ivlen; /* IV length */
54 int taglen;
55 int iv_gen; /* It is OK to generate IVs */
56 int iv_gen_rand; /* No IV was specified, so generate a rand IV */
57 int tls_aad_len; /* TLS AAD length */
58 uint64_t tls_enc_records; /* Number of TLS records encrypted */
59 ctr128_f ctr;
60} EVP_AES_GCM_CTX;
61
62typedef struct {
63 union {
64 OSSL_UNION_ALIGN;
65 AES_KEY ks;
66 } ks1, ks2; /* AES key schedules to use */
67 XTS128_CONTEXT xts;
68 void (*stream) (const unsigned char *in,
69 unsigned char *out, size_t length,
70 const AES_KEY *key1, const AES_KEY *key2,
71 const unsigned char iv[16]);
72} EVP_AES_XTS_CTX;
73
74#ifdef FIPS_MODULE
75static const int allow_insecure_decrypt = 0;
76#else
77static const int allow_insecure_decrypt = 1;
78#endif
79
80typedef struct {
81 union {
82 OSSL_UNION_ALIGN;
83 AES_KEY ks;
84 } ks; /* AES key schedule to use */
85 int key_set; /* Set if key initialised */
86 int iv_set; /* Set if an iv is set */
87 int tag_set; /* Set if tag is valid */
88 int len_set; /* Set if message length set */
89 int L, M; /* L and M parameters from RFC3610 */
90 int tls_aad_len; /* TLS AAD length */
91 CCM128_CONTEXT ccm;
92 ccm128_f str;
93} EVP_AES_CCM_CTX;
94
95#ifndef OPENSSL_NO_OCB
96typedef struct {
97 union {
98 OSSL_UNION_ALIGN;
99 AES_KEY ks;
100 } ksenc; /* AES key schedule to use for encryption */
101 union {
102 OSSL_UNION_ALIGN;
103 AES_KEY ks;
104 } ksdec; /* AES key schedule to use for decryption */
105 int key_set; /* Set if key initialised */
106 int iv_set; /* Set if an iv is set */
107 OCB128_CONTEXT ocb;
108 unsigned char *iv; /* Temporary IV store */
109 unsigned char tag[16];
110 unsigned char data_buf[16]; /* Store partial data blocks */
111 unsigned char aad_buf[16]; /* Store partial AAD blocks */
112 int data_buf_len;
113 int aad_buf_len;
114 int ivlen; /* IV length */
115 int taglen;
116} EVP_AES_OCB_CTX;
117#endif
118
119#define MAXBITCHUNK ((size_t)1<<(sizeof(size_t)*8-4))
120
121/* increment counter (64-bit int) by 1 */
122static void ctr64_inc(unsigned char *counter)
123{
124 int n = 8;
125 unsigned char c;
126
127 do {
128 --n;
129 c = counter[n];
130 ++c;
131 counter[n] = c;
132 if (c)
133 return;
134 } while (n);
135}
136
137#if defined(AESNI_CAPABLE)
138# if defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
139# define AES_GCM_ASM2(gctx) (gctx->gcm.block==(block128_f)aesni_encrypt && \
140 gctx->gcm.ghash==gcm_ghash_avx)
141# undef AES_GCM_ASM2 /* minor size optimization */
142# endif
143
144static int aesni_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
145 const unsigned char *iv, int enc)
146{
147 int ret, mode;
148 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
149 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
150
151 if (keylen <= 0) {
152 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
153 return 0;
154 }
155 mode = EVP_CIPHER_CTX_get_mode(ctx);
156 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
157 && !enc) {
158 ret = aesni_set_decrypt_key(key, keylen, &dat->ks.ks);
159 dat->block = (block128_f) aesni_decrypt;
160 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
161 (cbc128_f) aesni_cbc_encrypt : NULL;
162 } else {
163 ret = aesni_set_encrypt_key(key, keylen, &dat->ks.ks);
164 dat->block = (block128_f) aesni_encrypt;
165 if (mode == EVP_CIPH_CBC_MODE)
166 dat->stream.cbc = (cbc128_f) aesni_cbc_encrypt;
167 else if (mode == EVP_CIPH_CTR_MODE)
168 dat->stream.ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
169 else
170 dat->stream.cbc = NULL;
171 }
172
173 if (ret < 0) {
174 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
175 return 0;
176 }
177
178 return 1;
179}
180
181static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
182 const unsigned char *in, size_t len)
183{
184 aesni_cbc_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
185 ctx->iv, EVP_CIPHER_CTX_is_encrypting(ctx));
186
187 return 1;
188}
189
190static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
191 const unsigned char *in, size_t len)
192{
193 size_t bl = EVP_CIPHER_CTX_get_block_size(ctx);
194
195 if (len < bl)
196 return 1;
197
198 aesni_ecb_encrypt(in, out, len, &EVP_C_DATA(EVP_AES_KEY,ctx)->ks.ks,
199 EVP_CIPHER_CTX_is_encrypting(ctx));
200
201 return 1;
202}
203
204# define aesni_ofb_cipher aes_ofb_cipher
205static int aesni_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
206 const unsigned char *in, size_t len);
207
208# define aesni_cfb_cipher aes_cfb_cipher
209static int aesni_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
210 const unsigned char *in, size_t len);
211
212# define aesni_cfb8_cipher aes_cfb8_cipher
213static int aesni_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
214 const unsigned char *in, size_t len);
215
216# define aesni_cfb1_cipher aes_cfb1_cipher
217static int aesni_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
218 const unsigned char *in, size_t len);
219
220# define aesni_ctr_cipher aes_ctr_cipher
221static int aesni_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
222 const unsigned char *in, size_t len);
223
224static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
225 const unsigned char *iv, int enc)
226{
227 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX, ctx);
228
229 if (iv == NULL && key == NULL)
230 return 1;
231
232 if (key) {
233 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
234
235 if (keylen <= 0) {
236 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
237 return 0;
238 }
239 aesni_set_encrypt_key(key, keylen, &gctx->ks.ks);
240 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f) aesni_encrypt);
241 gctx->ctr = (ctr128_f) aesni_ctr32_encrypt_blocks;
242 /*
243 * If we have an iv can set it directly, otherwise use saved IV.
244 */
245 if (iv == NULL && gctx->iv_set)
246 iv = gctx->iv;
247 if (iv) {
248 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
249 gctx->iv_set = 1;
250 }
251 gctx->key_set = 1;
252 } else {
253 /* If key set use IV, otherwise copy */
254 if (gctx->key_set)
255 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
256 else
257 memcpy(gctx->iv, iv, gctx->ivlen);
258 gctx->iv_set = 1;
259 gctx->iv_gen = 0;
260 }
261 return 1;
262}
263
264# define aesni_gcm_cipher aes_gcm_cipher
265static int aesni_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
266 const unsigned char *in, size_t len);
267
268static int aesni_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
269 const unsigned char *iv, int enc)
270{
271 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
272
273 if (iv == NULL && key == NULL)
274 return 1;
275
276 if (key) {
277 /* The key is two half length keys in reality */
278 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
279 const int bytes = keylen / 2;
280 const int bits = bytes * 8;
281
282 if (keylen <= 0) {
283 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
284 return 0;
285 }
286 /*
287 * Verify that the two keys are different.
288 *
289 * This addresses Rogaway's vulnerability.
290 * See comment in aes_xts_init_key() below.
291 */
292 if ((!allow_insecure_decrypt || enc)
293 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
294 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
295 return 0;
296 }
297
298 /* key_len is two AES keys */
299 if (enc) {
300 aesni_set_encrypt_key(key, bits, &xctx->ks1.ks);
301 xctx->xts.block1 = (block128_f) aesni_encrypt;
302 xctx->stream = aesni_xts_encrypt;
303 } else {
304 aesni_set_decrypt_key(key, bits, &xctx->ks1.ks);
305 xctx->xts.block1 = (block128_f) aesni_decrypt;
306 xctx->stream = aesni_xts_decrypt;
307 }
308
309 aesni_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
310 xctx->xts.block2 = (block128_f) aesni_encrypt;
311
312 xctx->xts.key1 = &xctx->ks1;
313 }
314
315 if (iv) {
316 xctx->xts.key2 = &xctx->ks2;
317 memcpy(ctx->iv, iv, 16);
318 }
319
320 return 1;
321}
322
323# define aesni_xts_cipher aes_xts_cipher
324static int aesni_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
325 const unsigned char *in, size_t len);
326
327static int aesni_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
328 const unsigned char *iv, int enc)
329{
330 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
331
332 if (iv == NULL && key == NULL)
333 return 1;
334
335 if (key != NULL) {
336 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
337
338 if (keylen <= 0) {
339 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
340 return 0;
341 }
342 aesni_set_encrypt_key(key, keylen, &cctx->ks.ks);
343 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
344 &cctx->ks, (block128_f) aesni_encrypt);
345 cctx->str = enc ? (ccm128_f) aesni_ccm64_encrypt_blocks :
346 (ccm128_f) aesni_ccm64_decrypt_blocks;
347 cctx->key_set = 1;
348 }
349 if (iv) {
350 memcpy(ctx->iv, iv, 15 - cctx->L);
351 cctx->iv_set = 1;
352 }
353 return 1;
354}
355
356# define aesni_ccm_cipher aes_ccm_cipher
357static int aesni_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
358 const unsigned char *in, size_t len);
359
360# ifndef OPENSSL_NO_OCB
361static int aesni_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
362 const unsigned char *iv, int enc)
363{
364 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
365
366 if (iv == NULL && key == NULL)
367 return 1;
368
369 if (key != NULL) {
370 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
371
372 if (keylen <= 0) {
373 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
374 return 0;
375 }
376 do {
377 /*
378 * We set both the encrypt and decrypt key here because decrypt
379 * needs both. We could possibly optimise to remove setting the
380 * decrypt for an encryption operation.
381 */
382 aesni_set_encrypt_key(key, keylen, &octx->ksenc.ks);
383 aesni_set_decrypt_key(key, keylen, &octx->ksdec.ks);
384 if (!CRYPTO_ocb128_init(&octx->ocb,
385 &octx->ksenc.ks, &octx->ksdec.ks,
386 (block128_f) aesni_encrypt,
387 (block128_f) aesni_decrypt,
388 enc ? aesni_ocb_encrypt
389 : aesni_ocb_decrypt))
390 return 0;
391 }
392 while (0);
393
394 /*
395 * If we have an iv we can set it directly, otherwise use saved IV.
396 */
397 if (iv == NULL && octx->iv_set)
398 iv = octx->iv;
399 if (iv) {
400 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
401 != 1)
402 return 0;
403 octx->iv_set = 1;
404 }
405 octx->key_set = 1;
406 } else {
407 /* If key set use IV, otherwise copy */
408 if (octx->key_set)
409 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
410 else
411 memcpy(octx->iv, iv, octx->ivlen);
412 octx->iv_set = 1;
413 }
414 return 1;
415}
416
417# define aesni_ocb_cipher aes_ocb_cipher
418static int aesni_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
419 const unsigned char *in, size_t len);
420# endif /* OPENSSL_NO_OCB */
421
422# define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
423static const EVP_CIPHER aesni_##keylen##_##mode = { \
424 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
425 flags|EVP_CIPH_##MODE##_MODE, \
426 EVP_ORIG_GLOBAL, \
427 aesni_init_key, \
428 aesni_##mode##_cipher, \
429 NULL, \
430 sizeof(EVP_AES_KEY), \
431 NULL,NULL,NULL,NULL }; \
432static const EVP_CIPHER aes_##keylen##_##mode = { \
433 nid##_##keylen##_##nmode,blocksize, \
434 keylen/8,ivlen, \
435 flags|EVP_CIPH_##MODE##_MODE, \
436 EVP_ORIG_GLOBAL, \
437 aes_init_key, \
438 aes_##mode##_cipher, \
439 NULL, \
440 sizeof(EVP_AES_KEY), \
441 NULL,NULL,NULL,NULL }; \
442const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
443{ return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
444
445# define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
446static const EVP_CIPHER aesni_##keylen##_##mode = { \
447 nid##_##keylen##_##mode,blocksize, \
448 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
449 ivlen, \
450 flags|EVP_CIPH_##MODE##_MODE, \
451 EVP_ORIG_GLOBAL, \
452 aesni_##mode##_init_key, \
453 aesni_##mode##_cipher, \
454 aes_##mode##_cleanup, \
455 sizeof(EVP_AES_##MODE##_CTX), \
456 NULL,NULL,aes_##mode##_ctrl,NULL }; \
457static const EVP_CIPHER aes_##keylen##_##mode = { \
458 nid##_##keylen##_##mode,blocksize, \
459 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
460 ivlen, \
461 flags|EVP_CIPH_##MODE##_MODE, \
462 EVP_ORIG_GLOBAL, \
463 aes_##mode##_init_key, \
464 aes_##mode##_cipher, \
465 aes_##mode##_cleanup, \
466 sizeof(EVP_AES_##MODE##_CTX), \
467 NULL,NULL,aes_##mode##_ctrl,NULL }; \
468const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
469{ return AESNI_CAPABLE?&aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
470
471#elif defined(SPARC_AES_CAPABLE)
472
473static int aes_t4_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
474 const unsigned char *iv, int enc)
475{
476 int ret, mode, bits;
477 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
478
479 mode = EVP_CIPHER_CTX_get_mode(ctx);
480 bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
481 if (bits <= 0) {
482 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
483 return 0;
484 }
485 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
486 && !enc) {
487 ret = 0;
488 aes_t4_set_decrypt_key(key, bits, &dat->ks.ks);
489 dat->block = (block128_f) aes_t4_decrypt;
490 switch (bits) {
491 case 128:
492 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
493 (cbc128_f) aes128_t4_cbc_decrypt : NULL;
494 break;
495 case 192:
496 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
497 (cbc128_f) aes192_t4_cbc_decrypt : NULL;
498 break;
499 case 256:
500 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
501 (cbc128_f) aes256_t4_cbc_decrypt : NULL;
502 break;
503 default:
504 ret = -1;
505 }
506 } else {
507 ret = 0;
508 aes_t4_set_encrypt_key(key, bits, &dat->ks.ks);
509 dat->block = (block128_f) aes_t4_encrypt;
510 switch (bits) {
511 case 128:
512 if (mode == EVP_CIPH_CBC_MODE)
513 dat->stream.cbc = (cbc128_f) aes128_t4_cbc_encrypt;
514 else if (mode == EVP_CIPH_CTR_MODE)
515 dat->stream.ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
516 else
517 dat->stream.cbc = NULL;
518 break;
519 case 192:
520 if (mode == EVP_CIPH_CBC_MODE)
521 dat->stream.cbc = (cbc128_f) aes192_t4_cbc_encrypt;
522 else if (mode == EVP_CIPH_CTR_MODE)
523 dat->stream.ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
524 else
525 dat->stream.cbc = NULL;
526 break;
527 case 256:
528 if (mode == EVP_CIPH_CBC_MODE)
529 dat->stream.cbc = (cbc128_f) aes256_t4_cbc_encrypt;
530 else if (mode == EVP_CIPH_CTR_MODE)
531 dat->stream.ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
532 else
533 dat->stream.cbc = NULL;
534 break;
535 default:
536 ret = -1;
537 }
538 }
539
540 if (ret < 0) {
541 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
542 return 0;
543 }
544
545 return 1;
546}
547
548# define aes_t4_cbc_cipher aes_cbc_cipher
549static int aes_t4_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
550 const unsigned char *in, size_t len);
551
552# define aes_t4_ecb_cipher aes_ecb_cipher
553static int aes_t4_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
554 const unsigned char *in, size_t len);
555
556# define aes_t4_ofb_cipher aes_ofb_cipher
557static int aes_t4_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
558 const unsigned char *in, size_t len);
559
560# define aes_t4_cfb_cipher aes_cfb_cipher
561static int aes_t4_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
562 const unsigned char *in, size_t len);
563
564# define aes_t4_cfb8_cipher aes_cfb8_cipher
565static int aes_t4_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
566 const unsigned char *in, size_t len);
567
568# define aes_t4_cfb1_cipher aes_cfb1_cipher
569static int aes_t4_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
570 const unsigned char *in, size_t len);
571
572# define aes_t4_ctr_cipher aes_ctr_cipher
573static int aes_t4_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
574 const unsigned char *in, size_t len);
575
576static int aes_t4_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
577 const unsigned char *iv, int enc)
578{
579 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
580
581 if (iv == NULL && key == NULL)
582 return 1;
583 if (key) {
584 const int bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
585
586 if (bits <= 0) {
587 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
588 return 0;
589 }
590 aes_t4_set_encrypt_key(key, bits, &gctx->ks.ks);
591 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
592 (block128_f) aes_t4_encrypt);
593 switch (bits) {
594 case 128:
595 gctx->ctr = (ctr128_f) aes128_t4_ctr32_encrypt;
596 break;
597 case 192:
598 gctx->ctr = (ctr128_f) aes192_t4_ctr32_encrypt;
599 break;
600 case 256:
601 gctx->ctr = (ctr128_f) aes256_t4_ctr32_encrypt;
602 break;
603 default:
604 return 0;
605 }
606 /*
607 * If we have an iv can set it directly, otherwise use saved IV.
608 */
609 if (iv == NULL && gctx->iv_set)
610 iv = gctx->iv;
611 if (iv) {
612 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
613 gctx->iv_set = 1;
614 }
615 gctx->key_set = 1;
616 } else {
617 /* If key set use IV, otherwise copy */
618 if (gctx->key_set)
619 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
620 else
621 memcpy(gctx->iv, iv, gctx->ivlen);
622 gctx->iv_set = 1;
623 gctx->iv_gen = 0;
624 }
625 return 1;
626}
627
628# define aes_t4_gcm_cipher aes_gcm_cipher
629static int aes_t4_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
630 const unsigned char *in, size_t len);
631
632static int aes_t4_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
633 const unsigned char *iv, int enc)
634{
635 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
636
637 if (!iv && !key)
638 return 1;
639
640 if (key) {
641 /* The key is two half length keys in reality */
642 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
643 const int bytes = keylen / 2;
644 const int bits = bytes * 8;
645
646 if (keylen <= 0) {
647 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
648 return 0;
649 }
650 /*
651 * Verify that the two keys are different.
652 *
653 * This addresses Rogaway's vulnerability.
654 * See comment in aes_xts_init_key() below.
655 */
656 if ((!allow_insecure_decrypt || enc)
657 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
658 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
659 return 0;
660 }
661
662 xctx->stream = NULL;
663 /* key_len is two AES keys */
664 if (enc) {
665 aes_t4_set_encrypt_key(key, bits, &xctx->ks1.ks);
666 xctx->xts.block1 = (block128_f) aes_t4_encrypt;
667 switch (bits) {
668 case 128:
669 xctx->stream = aes128_t4_xts_encrypt;
670 break;
671 case 256:
672 xctx->stream = aes256_t4_xts_encrypt;
673 break;
674 default:
675 return 0;
676 }
677 } else {
678 aes_t4_set_decrypt_key(key, bits, &xctx->ks1.ks);
679 xctx->xts.block1 = (block128_f) aes_t4_decrypt;
680 switch (bits) {
681 case 128:
682 xctx->stream = aes128_t4_xts_decrypt;
683 break;
684 case 256:
685 xctx->stream = aes256_t4_xts_decrypt;
686 break;
687 default:
688 return 0;
689 }
690 }
691
692 aes_t4_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
693 xctx->xts.block2 = (block128_f) aes_t4_encrypt;
694
695 xctx->xts.key1 = &xctx->ks1;
696 }
697
698 if (iv) {
699 xctx->xts.key2 = &xctx->ks2;
700 memcpy(ctx->iv, iv, 16);
701 }
702
703 return 1;
704}
705
706# define aes_t4_xts_cipher aes_xts_cipher
707static int aes_t4_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
708 const unsigned char *in, size_t len);
709
710static int aes_t4_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
711 const unsigned char *iv, int enc)
712{
713 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
714
715 if (iv == NULL && key == NULL)
716 return 1;
717
718 if (key != NULL) {
719 const int bits = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
720
721 if (bits <= 0) {
722 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
723 return 0;
724 }
725 aes_t4_set_encrypt_key(key, bits, &cctx->ks.ks);
726 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
727 &cctx->ks, (block128_f) aes_t4_encrypt);
728 cctx->str = NULL;
729 cctx->key_set = 1;
730 }
731 if (iv) {
732 memcpy(ctx->iv, iv, 15 - cctx->L);
733 cctx->iv_set = 1;
734 }
735 return 1;
736}
737
738# define aes_t4_ccm_cipher aes_ccm_cipher
739static int aes_t4_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
740 const unsigned char *in, size_t len);
741
742# ifndef OPENSSL_NO_OCB
743static int aes_t4_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
744 const unsigned char *iv, int enc)
745{
746 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
747
748 if (iv == NULL && key == NULL)
749 return 1;
750
751 if (key != NULL) {
752 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
753
754 if (keylen <= 0) {
755 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
756 return 0;
757 }
758 do {
759 /*
760 * We set both the encrypt and decrypt key here because decrypt
761 * needs both. We could possibly optimise to remove setting the
762 * decrypt for an encryption operation.
763 */
764 aes_t4_set_encrypt_key(key, keylen, &octx->ksenc.ks);
765 aes_t4_set_decrypt_key(key, keylen, &octx->ksdec.ks);
766 if (!CRYPTO_ocb128_init(&octx->ocb,
767 &octx->ksenc.ks, &octx->ksdec.ks,
768 (block128_f) aes_t4_encrypt,
769 (block128_f) aes_t4_decrypt,
770 NULL))
771 return 0;
772 }
773 while (0);
774
775 /*
776 * If we have an iv we can set it directly, otherwise use saved IV.
777 */
778 if (iv == NULL && octx->iv_set)
779 iv = octx->iv;
780 if (iv) {
781 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
782 != 1)
783 return 0;
784 octx->iv_set = 1;
785 }
786 octx->key_set = 1;
787 } else {
788 /* If key set use IV, otherwise copy */
789 if (octx->key_set)
790 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
791 else
792 memcpy(octx->iv, iv, octx->ivlen);
793 octx->iv_set = 1;
794 }
795 return 1;
796}
797
798# define aes_t4_ocb_cipher aes_ocb_cipher
799static int aes_t4_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
800 const unsigned char *in, size_t len);
801# endif /* OPENSSL_NO_OCB */
802
803# ifndef OPENSSL_NO_SIV
804# define aes_t4_siv_init_key aes_siv_init_key
805# define aes_t4_siv_cipher aes_siv_cipher
806# endif /* OPENSSL_NO_SIV */
807
808# define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
809static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
810 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
811 flags|EVP_CIPH_##MODE##_MODE, \
812 EVP_ORIG_GLOBAL, \
813 aes_t4_init_key, \
814 aes_t4_##mode##_cipher, \
815 NULL, \
816 sizeof(EVP_AES_KEY), \
817 NULL,NULL,NULL,NULL }; \
818static const EVP_CIPHER aes_##keylen##_##mode = { \
819 nid##_##keylen##_##nmode,blocksize, \
820 keylen/8,ivlen, \
821 flags|EVP_CIPH_##MODE##_MODE, \
822 EVP_ORIG_GLOBAL, \
823 aes_init_key, \
824 aes_##mode##_cipher, \
825 NULL, \
826 sizeof(EVP_AES_KEY), \
827 NULL,NULL,NULL,NULL }; \
828const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
829{ return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
830
831# define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
832static const EVP_CIPHER aes_t4_##keylen##_##mode = { \
833 nid##_##keylen##_##mode,blocksize, \
834 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
835 ivlen, \
836 flags|EVP_CIPH_##MODE##_MODE, \
837 EVP_ORIG_GLOBAL, \
838 aes_t4_##mode##_init_key, \
839 aes_t4_##mode##_cipher, \
840 aes_##mode##_cleanup, \
841 sizeof(EVP_AES_##MODE##_CTX), \
842 NULL,NULL,aes_##mode##_ctrl,NULL }; \
843static const EVP_CIPHER aes_##keylen##_##mode = { \
844 nid##_##keylen##_##mode,blocksize, \
845 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
846 ivlen, \
847 flags|EVP_CIPH_##MODE##_MODE, \
848 EVP_ORIG_GLOBAL, \
849 aes_##mode##_init_key, \
850 aes_##mode##_cipher, \
851 aes_##mode##_cleanup, \
852 sizeof(EVP_AES_##MODE##_CTX), \
853 NULL,NULL,aes_##mode##_ctrl,NULL }; \
854const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
855{ return SPARC_AES_CAPABLE?&aes_t4_##keylen##_##mode:&aes_##keylen##_##mode; }
856
857#elif defined(S390X_aes_128_CAPABLE)
858/* IBM S390X support */
859typedef struct {
860 union {
861 OSSL_UNION_ALIGN;
862 /*-
863 * KM-AES parameter block - begin
864 * (see z/Architecture Principles of Operation >= SA22-7832-06)
865 */
866 struct {
867 unsigned char k[32];
868 } param;
869 /* KM-AES parameter block - end */
870 } km;
871 unsigned int fc;
872} S390X_AES_ECB_CTX;
873
874typedef struct {
875 union {
876 OSSL_UNION_ALIGN;
877 /*-
878 * KMO-AES parameter block - begin
879 * (see z/Architecture Principles of Operation >= SA22-7832-08)
880 */
881 struct {
882 unsigned char cv[16];
883 unsigned char k[32];
884 } param;
885 /* KMO-AES parameter block - end */
886 } kmo;
887 unsigned int fc;
888
889 int res;
890} S390X_AES_OFB_CTX;
891
892typedef struct {
893 union {
894 OSSL_UNION_ALIGN;
895 /*-
896 * KMF-AES parameter block - begin
897 * (see z/Architecture Principles of Operation >= SA22-7832-08)
898 */
899 struct {
900 unsigned char cv[16];
901 unsigned char k[32];
902 } param;
903 /* KMF-AES parameter block - end */
904 } kmf;
905 unsigned int fc;
906
907 int res;
908} S390X_AES_CFB_CTX;
909
910typedef struct {
911 union {
912 OSSL_UNION_ALIGN;
913 /*-
914 * KMA-GCM-AES parameter block - begin
915 * (see z/Architecture Principles of Operation >= SA22-7832-11)
916 */
917 struct {
918 unsigned char reserved[12];
919 union {
920 unsigned int w;
921 unsigned char b[4];
922 } cv;
923 union {
924 unsigned long long g[2];
925 unsigned char b[16];
926 } t;
927 unsigned char h[16];
928 unsigned long long taadl;
929 unsigned long long tpcl;
930 union {
931 unsigned long long g[2];
932 unsigned int w[4];
933 } j0;
934 unsigned char k[32];
935 } param;
936 /* KMA-GCM-AES parameter block - end */
937 } kma;
938 unsigned int fc;
939 int key_set;
940
941 unsigned char *iv;
942 int ivlen;
943 int iv_set;
944 int iv_gen;
945
946 int taglen;
947
948 unsigned char ares[16];
949 unsigned char mres[16];
950 unsigned char kres[16];
951 int areslen;
952 int mreslen;
953 int kreslen;
954
955 int tls_aad_len;
956 uint64_t tls_enc_records; /* Number of TLS records encrypted */
957} S390X_AES_GCM_CTX;
958
959typedef struct {
960 union {
961 OSSL_UNION_ALIGN;
962 /*-
963 * Padding is chosen so that ccm.kmac_param.k overlaps with key.k and
964 * ccm.fc with key.k.rounds. Remember that on s390x, an AES_KEY's
965 * rounds field is used to store the function code and that the key
966 * schedule is not stored (if aes hardware support is detected).
967 */
968 struct {
969 unsigned char pad[16];
970 AES_KEY k;
971 } key;
972
973 struct {
974 /*-
975 * KMAC-AES parameter block - begin
976 * (see z/Architecture Principles of Operation >= SA22-7832-08)
977 */
978 struct {
979 union {
980 unsigned long long g[2];
981 unsigned char b[16];
982 } icv;
983 unsigned char k[32];
984 } kmac_param;
985 /* KMAC-AES parameter block - end */
986
987 union {
988 unsigned long long g[2];
989 unsigned char b[16];
990 } nonce;
991 union {
992 unsigned long long g[2];
993 unsigned char b[16];
994 } buf;
995
996 unsigned long long blocks;
997 int l;
998 int m;
999 int tls_aad_len;
1000 int iv_set;
1001 int tag_set;
1002 int len_set;
1003 int key_set;
1004
1005 unsigned char pad[140];
1006 unsigned int fc;
1007 } ccm;
1008 } aes;
1009} S390X_AES_CCM_CTX;
1010
1011# define s390x_aes_init_key aes_init_key
1012static int s390x_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
1013 const unsigned char *iv, int enc);
1014
1015# define S390X_AES_CBC_CTX EVP_AES_KEY
1016
1017# define s390x_aes_cbc_init_key aes_init_key
1018
1019# define s390x_aes_cbc_cipher aes_cbc_cipher
1020static int s390x_aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1021 const unsigned char *in, size_t len);
1022
1023static int s390x_aes_ecb_init_key(EVP_CIPHER_CTX *ctx,
1024 const unsigned char *key,
1025 const unsigned char *iv, int enc)
1026{
1027 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
1028 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1029
1030 if (keylen <= 0) {
1031 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1032 return 0;
1033 }
1034 cctx->fc = S390X_AES_FC(keylen);
1035 if (!enc)
1036 cctx->fc |= S390X_DECRYPT;
1037
1038 memcpy(cctx->km.param.k, key, keylen);
1039 return 1;
1040}
1041
1042static int s390x_aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1043 const unsigned char *in, size_t len)
1044{
1045 S390X_AES_ECB_CTX *cctx = EVP_C_DATA(S390X_AES_ECB_CTX, ctx);
1046
1047 s390x_km(in, len, out, cctx->fc, &cctx->km.param);
1048 return 1;
1049}
1050
1051static int s390x_aes_ofb_init_key(EVP_CIPHER_CTX *ctx,
1052 const unsigned char *key,
1053 const unsigned char *ivec, int enc)
1054{
1055 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1056 const unsigned char *iv = ctx->oiv;
1057 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1058 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1059
1060 if (keylen <= 0) {
1061 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1062 return 0;
1063 }
1064 if (ivlen <= 0) {
1065 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1066 return 0;
1067 }
1068 memcpy(cctx->kmo.param.cv, iv, ivlen);
1069 memcpy(cctx->kmo.param.k, key, keylen);
1070 cctx->fc = S390X_AES_FC(keylen);
1071 cctx->res = 0;
1072 return 1;
1073}
1074
1075static int s390x_aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1076 const unsigned char *in, size_t len)
1077{
1078 S390X_AES_OFB_CTX *cctx = EVP_C_DATA(S390X_AES_OFB_CTX, ctx);
1079 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1080 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1081 int n = cctx->res;
1082 int rem;
1083
1084 memcpy(cctx->kmo.param.cv, iv, ivlen);
1085 while (n && len) {
1086 *out = *in ^ cctx->kmo.param.cv[n];
1087 n = (n + 1) & 0xf;
1088 --len;
1089 ++in;
1090 ++out;
1091 }
1092
1093 rem = len & 0xf;
1094
1095 len &= ~(size_t)0xf;
1096 if (len) {
1097 s390x_kmo(in, len, out, cctx->fc, &cctx->kmo.param);
1098
1099 out += len;
1100 in += len;
1101 }
1102
1103 if (rem) {
1104 s390x_km(cctx->kmo.param.cv, 16, cctx->kmo.param.cv, cctx->fc,
1105 cctx->kmo.param.k);
1106
1107 while (rem--) {
1108 out[n] = in[n] ^ cctx->kmo.param.cv[n];
1109 ++n;
1110 }
1111 }
1112
1113 memcpy(iv, cctx->kmo.param.cv, ivlen);
1114 cctx->res = n;
1115 return 1;
1116}
1117
1118static int s390x_aes_cfb_init_key(EVP_CIPHER_CTX *ctx,
1119 const unsigned char *key,
1120 const unsigned char *ivec, int enc)
1121{
1122 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1123 const unsigned char *iv = ctx->oiv;
1124 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1125 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1126
1127 if (keylen <= 0) {
1128 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1129 return 0;
1130 }
1131 if (ivlen <= 0) {
1132 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1133 return 0;
1134 }
1135 cctx->fc = S390X_AES_FC(keylen);
1136 cctx->fc |= 16 << 24; /* 16 bytes cipher feedback */
1137 if (!enc)
1138 cctx->fc |= S390X_DECRYPT;
1139
1140 cctx->res = 0;
1141 memcpy(cctx->kmf.param.cv, iv, ivlen);
1142 memcpy(cctx->kmf.param.k, key, keylen);
1143 return 1;
1144}
1145
1146static int s390x_aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1147 const unsigned char *in, size_t len)
1148{
1149 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1150 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1151 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1152 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1153 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1154 int n = cctx->res;
1155 int rem;
1156 unsigned char tmp;
1157
1158 if (keylen <= 0) {
1159 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1160 return 0;
1161 }
1162 if (ivlen <= 0) {
1163 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1164 return 0;
1165 }
1166 memcpy(cctx->kmf.param.cv, iv, ivlen);
1167 while (n && len) {
1168 tmp = *in;
1169 *out = cctx->kmf.param.cv[n] ^ tmp;
1170 cctx->kmf.param.cv[n] = enc ? *out : tmp;
1171 n = (n + 1) & 0xf;
1172 --len;
1173 ++in;
1174 ++out;
1175 }
1176
1177 rem = len & 0xf;
1178
1179 len &= ~(size_t)0xf;
1180 if (len) {
1181 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1182
1183 out += len;
1184 in += len;
1185 }
1186
1187 if (rem) {
1188 s390x_km(cctx->kmf.param.cv, 16, cctx->kmf.param.cv,
1189 S390X_AES_FC(keylen), cctx->kmf.param.k);
1190
1191 while (rem--) {
1192 tmp = in[n];
1193 out[n] = cctx->kmf.param.cv[n] ^ tmp;
1194 cctx->kmf.param.cv[n] = enc ? out[n] : tmp;
1195 ++n;
1196 }
1197 }
1198
1199 memcpy(iv, cctx->kmf.param.cv, ivlen);
1200 cctx->res = n;
1201 return 1;
1202}
1203
1204static int s390x_aes_cfb8_init_key(EVP_CIPHER_CTX *ctx,
1205 const unsigned char *key,
1206 const unsigned char *ivec, int enc)
1207{
1208 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1209 const unsigned char *iv = ctx->oiv;
1210 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1211 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1212
1213 if (keylen <= 0) {
1214 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1215 return 0;
1216 }
1217 if (ivlen <= 0) {
1218 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_IV_LENGTH);
1219 return 0;
1220 }
1221 cctx->fc = S390X_AES_FC(keylen);
1222 cctx->fc |= 1 << 24; /* 1 byte cipher feedback */
1223 if (!enc)
1224 cctx->fc |= S390X_DECRYPT;
1225
1226 memcpy(cctx->kmf.param.cv, iv, ivlen);
1227 memcpy(cctx->kmf.param.k, key, keylen);
1228 return 1;
1229}
1230
1231static int s390x_aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1232 const unsigned char *in, size_t len)
1233{
1234 S390X_AES_CFB_CTX *cctx = EVP_C_DATA(S390X_AES_CFB_CTX, ctx);
1235 const int ivlen = EVP_CIPHER_CTX_get_iv_length(ctx);
1236 unsigned char *iv = EVP_CIPHER_CTX_iv_noconst(ctx);
1237
1238 memcpy(cctx->kmf.param.cv, iv, ivlen);
1239 s390x_kmf(in, len, out, cctx->fc, &cctx->kmf.param);
1240 memcpy(iv, cctx->kmf.param.cv, ivlen);
1241 return 1;
1242}
1243
1244# define s390x_aes_cfb1_init_key aes_init_key
1245
1246# define s390x_aes_cfb1_cipher aes_cfb1_cipher
1247static int s390x_aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1248 const unsigned char *in, size_t len);
1249
1250# define S390X_AES_CTR_CTX EVP_AES_KEY
1251
1252# define s390x_aes_ctr_init_key aes_init_key
1253
1254# define s390x_aes_ctr_cipher aes_ctr_cipher
1255static int s390x_aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1256 const unsigned char *in, size_t len);
1257
1258/* iv + padding length for iv lengths != 12 */
1259# define S390X_gcm_ivpadlen(i) ((((i) + 15) >> 4 << 4) + 16)
1260
1261/*-
1262 * Process additional authenticated data. Returns 0 on success. Code is
1263 * big-endian.
1264 */
1265static int s390x_aes_gcm_aad(S390X_AES_GCM_CTX *ctx, const unsigned char *aad,
1266 size_t len)
1267{
1268 unsigned long long alen;
1269 int n, rem;
1270
1271 if (ctx->kma.param.tpcl)
1272 return -2;
1273
1274 alen = ctx->kma.param.taadl + len;
1275 if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
1276 return -1;
1277 ctx->kma.param.taadl = alen;
1278
1279 n = ctx->areslen;
1280 if (n) {
1281 while (n && len) {
1282 ctx->ares[n] = *aad;
1283 n = (n + 1) & 0xf;
1284 ++aad;
1285 --len;
1286 }
1287 /* ctx->ares contains a complete block if offset has wrapped around */
1288 if (!n) {
1289 s390x_kma(ctx->ares, 16, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1290 ctx->fc |= S390X_KMA_HS;
1291 }
1292 ctx->areslen = n;
1293 }
1294
1295 rem = len & 0xf;
1296
1297 len &= ~(size_t)0xf;
1298 if (len) {
1299 s390x_kma(aad, len, NULL, 0, NULL, ctx->fc, &ctx->kma.param);
1300 aad += len;
1301 ctx->fc |= S390X_KMA_HS;
1302 }
1303
1304 if (rem) {
1305 ctx->areslen = rem;
1306
1307 do {
1308 --rem;
1309 ctx->ares[rem] = aad[rem];
1310 } while (rem);
1311 }
1312 return 0;
1313}
1314
1315/*-
1316 * En/de-crypt plain/cipher-text and authenticate ciphertext. Returns 0 for
1317 * success. Code is big-endian.
1318 */
1319static int s390x_aes_gcm(S390X_AES_GCM_CTX *ctx, const unsigned char *in,
1320 unsigned char *out, size_t len)
1321{
1322 const unsigned char *inptr;
1323 unsigned long long mlen;
1324 union {
1325 unsigned int w[4];
1326 unsigned char b[16];
1327 } buf;
1328 size_t inlen;
1329 int n, rem, i;
1330
1331 mlen = ctx->kma.param.tpcl + len;
1332 if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
1333 return -1;
1334 ctx->kma.param.tpcl = mlen;
1335
1336 n = ctx->mreslen;
1337 if (n) {
1338 inptr = in;
1339 inlen = len;
1340 while (n && inlen) {
1341 ctx->mres[n] = *inptr;
1342 n = (n + 1) & 0xf;
1343 ++inptr;
1344 --inlen;
1345 }
1346 /* ctx->mres contains a complete block if offset has wrapped around */
1347 if (!n) {
1348 s390x_kma(ctx->ares, ctx->areslen, ctx->mres, 16, buf.b,
1349 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1350 ctx->fc |= S390X_KMA_HS;
1351 ctx->areslen = 0;
1352
1353 /* previous call already encrypted/decrypted its remainder,
1354 * see comment below */
1355 n = ctx->mreslen;
1356 while (n) {
1357 *out = buf.b[n];
1358 n = (n + 1) & 0xf;
1359 ++out;
1360 ++in;
1361 --len;
1362 }
1363 ctx->mreslen = 0;
1364 }
1365 }
1366
1367 rem = len & 0xf;
1368
1369 len &= ~(size_t)0xf;
1370 if (len) {
1371 s390x_kma(ctx->ares, ctx->areslen, in, len, out,
1372 ctx->fc | S390X_KMA_LAAD, &ctx->kma.param);
1373 in += len;
1374 out += len;
1375 ctx->fc |= S390X_KMA_HS;
1376 ctx->areslen = 0;
1377 }
1378
1379 /*-
1380 * If there is a remainder, it has to be saved such that it can be
1381 * processed by kma later. However, we also have to do the for-now
1382 * unauthenticated encryption/decryption part here and now...
1383 */
1384 if (rem) {
1385 if (!ctx->mreslen) {
1386 buf.w[0] = ctx->kma.param.j0.w[0];
1387 buf.w[1] = ctx->kma.param.j0.w[1];
1388 buf.w[2] = ctx->kma.param.j0.w[2];
1389 buf.w[3] = ctx->kma.param.cv.w + 1;
1390 s390x_km(buf.b, 16, ctx->kres, ctx->fc & 0x1f, &ctx->kma.param.k);
1391 }
1392
1393 n = ctx->mreslen;
1394 for (i = 0; i < rem; i++) {
1395 ctx->mres[n + i] = in[i];
1396 out[i] = in[i] ^ ctx->kres[n + i];
1397 }
1398
1399 ctx->mreslen += rem;
1400 }
1401 return 0;
1402}
1403
1404/*-
1405 * Initialize context structure. Code is big-endian.
1406 */
1407static void s390x_aes_gcm_setiv(S390X_AES_GCM_CTX *ctx,
1408 const unsigned char *iv)
1409{
1410 ctx->kma.param.t.g[0] = 0;
1411 ctx->kma.param.t.g[1] = 0;
1412 ctx->kma.param.tpcl = 0;
1413 ctx->kma.param.taadl = 0;
1414 ctx->mreslen = 0;
1415 ctx->areslen = 0;
1416 ctx->kreslen = 0;
1417
1418 if (ctx->ivlen == 12) {
1419 memcpy(&ctx->kma.param.j0, iv, ctx->ivlen);
1420 ctx->kma.param.j0.w[3] = 1;
1421 ctx->kma.param.cv.w = 1;
1422 } else {
1423 /* ctx->iv has the right size and is already padded. */
1424 memcpy(ctx->iv, iv, ctx->ivlen);
1425 s390x_kma(ctx->iv, S390X_gcm_ivpadlen(ctx->ivlen), NULL, 0, NULL,
1426 ctx->fc, &ctx->kma.param);
1427 ctx->fc |= S390X_KMA_HS;
1428
1429 ctx->kma.param.j0.g[0] = ctx->kma.param.t.g[0];
1430 ctx->kma.param.j0.g[1] = ctx->kma.param.t.g[1];
1431 ctx->kma.param.cv.w = ctx->kma.param.j0.w[3];
1432 ctx->kma.param.t.g[0] = 0;
1433 ctx->kma.param.t.g[1] = 0;
1434 }
1435}
1436
1437/*-
1438 * Performs various operations on the context structure depending on control
1439 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
1440 * Code is big-endian.
1441 */
1442static int s390x_aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
1443{
1444 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1445 S390X_AES_GCM_CTX *gctx_out;
1446 EVP_CIPHER_CTX *out;
1447 unsigned char *buf;
1448 int ivlen, enc, len;
1449
1450 switch (type) {
1451 case EVP_CTRL_INIT:
1452 ivlen = EVP_CIPHER_get_iv_length(c->cipher);
1453 gctx->key_set = 0;
1454 gctx->iv_set = 0;
1455 gctx->ivlen = ivlen;
1456 gctx->iv = c->iv;
1457 gctx->taglen = -1;
1458 gctx->iv_gen = 0;
1459 gctx->tls_aad_len = -1;
1460 return 1;
1461
1462 case EVP_CTRL_GET_IVLEN:
1463 *(int *)ptr = gctx->ivlen;
1464 return 1;
1465
1466 case EVP_CTRL_AEAD_SET_IVLEN:
1467 if (arg <= 0)
1468 return 0;
1469
1470 if (arg != 12) {
1471 len = S390X_gcm_ivpadlen(arg);
1472
1473 /* Allocate memory for iv if needed. */
1474 if (gctx->ivlen == 12 || len > S390X_gcm_ivpadlen(gctx->ivlen)) {
1475 if (gctx->iv != c->iv)
1476 OPENSSL_free(gctx->iv);
1477
1478 if ((gctx->iv = OPENSSL_malloc(len)) == NULL) {
1479 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
1480 return 0;
1481 }
1482 }
1483 /* Add padding. */
1484 memset(gctx->iv + arg, 0, len - arg - 8);
1485 *((unsigned long long *)(gctx->iv + len - 8)) = arg << 3;
1486 }
1487 gctx->ivlen = arg;
1488 return 1;
1489
1490 case EVP_CTRL_AEAD_SET_TAG:
1491 buf = EVP_CIPHER_CTX_buf_noconst(c);
1492 enc = EVP_CIPHER_CTX_is_encrypting(c);
1493 if (arg <= 0 || arg > 16 || enc)
1494 return 0;
1495
1496 memcpy(buf, ptr, arg);
1497 gctx->taglen = arg;
1498 return 1;
1499
1500 case EVP_CTRL_AEAD_GET_TAG:
1501 enc = EVP_CIPHER_CTX_is_encrypting(c);
1502 if (arg <= 0 || arg > 16 || !enc || gctx->taglen < 0)
1503 return 0;
1504
1505 memcpy(ptr, gctx->kma.param.t.b, arg);
1506 return 1;
1507
1508 case EVP_CTRL_GCM_SET_IV_FIXED:
1509 /* Special case: -1 length restores whole iv */
1510 if (arg == -1) {
1511 memcpy(gctx->iv, ptr, gctx->ivlen);
1512 gctx->iv_gen = 1;
1513 return 1;
1514 }
1515 /*
1516 * Fixed field must be at least 4 bytes and invocation field at least
1517 * 8.
1518 */
1519 if ((arg < 4) || (gctx->ivlen - arg) < 8)
1520 return 0;
1521
1522 if (arg)
1523 memcpy(gctx->iv, ptr, arg);
1524
1525 enc = EVP_CIPHER_CTX_is_encrypting(c);
1526 if (enc && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
1527 return 0;
1528
1529 gctx->iv_gen = 1;
1530 return 1;
1531
1532 case EVP_CTRL_GCM_IV_GEN:
1533 if (gctx->iv_gen == 0 || gctx->key_set == 0)
1534 return 0;
1535
1536 s390x_aes_gcm_setiv(gctx, gctx->iv);
1537
1538 if (arg <= 0 || arg > gctx->ivlen)
1539 arg = gctx->ivlen;
1540
1541 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
1542 /*
1543 * Invocation field will be at least 8 bytes in size and so no need
1544 * to check wrap around or increment more than last 8 bytes.
1545 */
1546 ctr64_inc(gctx->iv + gctx->ivlen - 8);
1547 gctx->iv_set = 1;
1548 return 1;
1549
1550 case EVP_CTRL_GCM_SET_IV_INV:
1551 enc = EVP_CIPHER_CTX_is_encrypting(c);
1552 if (gctx->iv_gen == 0 || gctx->key_set == 0 || enc)
1553 return 0;
1554
1555 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
1556 s390x_aes_gcm_setiv(gctx, gctx->iv);
1557 gctx->iv_set = 1;
1558 return 1;
1559
1560 case EVP_CTRL_AEAD_TLS1_AAD:
1561 /* Save the aad for later use. */
1562 if (arg != EVP_AEAD_TLS1_AAD_LEN)
1563 return 0;
1564
1565 buf = EVP_CIPHER_CTX_buf_noconst(c);
1566 memcpy(buf, ptr, arg);
1567 gctx->tls_aad_len = arg;
1568 gctx->tls_enc_records = 0;
1569
1570 len = buf[arg - 2] << 8 | buf[arg - 1];
1571 /* Correct length for explicit iv. */
1572 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
1573 return 0;
1574 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
1575
1576 /* If decrypting correct for tag too. */
1577 enc = EVP_CIPHER_CTX_is_encrypting(c);
1578 if (!enc) {
1579 if (len < EVP_GCM_TLS_TAG_LEN)
1580 return 0;
1581 len -= EVP_GCM_TLS_TAG_LEN;
1582 }
1583 buf[arg - 2] = len >> 8;
1584 buf[arg - 1] = len & 0xff;
1585 /* Extra padding: tag appended to record. */
1586 return EVP_GCM_TLS_TAG_LEN;
1587
1588 case EVP_CTRL_COPY:
1589 out = ptr;
1590 gctx_out = EVP_C_DATA(S390X_AES_GCM_CTX, out);
1591
1592 if (gctx->iv == c->iv) {
1593 gctx_out->iv = out->iv;
1594 } else {
1595 len = S390X_gcm_ivpadlen(gctx->ivlen);
1596
1597 if ((gctx_out->iv = OPENSSL_malloc(len)) == NULL) {
1598 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
1599 return 0;
1600 }
1601
1602 memcpy(gctx_out->iv, gctx->iv, len);
1603 }
1604 return 1;
1605
1606 default:
1607 return -1;
1608 }
1609}
1610
1611/*-
1612 * Set key and/or iv. Returns 1 on success. Otherwise 0 is returned.
1613 */
1614static int s390x_aes_gcm_init_key(EVP_CIPHER_CTX *ctx,
1615 const unsigned char *key,
1616 const unsigned char *iv, int enc)
1617{
1618 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1619 int keylen;
1620
1621 if (iv == NULL && key == NULL)
1622 return 1;
1623
1624 if (key != NULL) {
1625 keylen = EVP_CIPHER_CTX_get_key_length(ctx);
1626 if (keylen <= 0) {
1627 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
1628 return 0;
1629 }
1630
1631 memcpy(&gctx->kma.param.k, key, keylen);
1632
1633 gctx->fc = S390X_AES_FC(keylen);
1634 if (!enc)
1635 gctx->fc |= S390X_DECRYPT;
1636
1637 if (iv == NULL && gctx->iv_set)
1638 iv = gctx->iv;
1639
1640 if (iv != NULL) {
1641 s390x_aes_gcm_setiv(gctx, iv);
1642 gctx->iv_set = 1;
1643 }
1644 gctx->key_set = 1;
1645 } else {
1646 if (gctx->key_set)
1647 s390x_aes_gcm_setiv(gctx, iv);
1648 else
1649 memcpy(gctx->iv, iv, gctx->ivlen);
1650
1651 gctx->iv_set = 1;
1652 gctx->iv_gen = 0;
1653 }
1654 return 1;
1655}
1656
1657/*-
1658 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1659 * if successful. Otherwise -1 is returned. Code is big-endian.
1660 */
1661static int s390x_aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1662 const unsigned char *in, size_t len)
1663{
1664 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1665 const unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1666 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1667 int rv = -1;
1668
1669 if (out != in || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
1670 return -1;
1671
1672 /*
1673 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
1674 * Requirements from SP 800-38D". The requirements is for one party to the
1675 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
1676 * side only.
1677 */
1678 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
1679 ERR_raise(ERR_LIB_EVP, EVP_R_TOO_MANY_RECORDS);
1680 goto err;
1681 }
1682
1683 if (EVP_CIPHER_CTX_ctrl(ctx, enc ? EVP_CTRL_GCM_IV_GEN
1684 : EVP_CTRL_GCM_SET_IV_INV,
1685 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
1686 goto err;
1687
1688 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1689 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
1690 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1691
1692 gctx->kma.param.taadl = gctx->tls_aad_len << 3;
1693 gctx->kma.param.tpcl = len << 3;
1694 s390x_kma(buf, gctx->tls_aad_len, in, len, out,
1695 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1696
1697 if (enc) {
1698 memcpy(out + len, gctx->kma.param.t.b, EVP_GCM_TLS_TAG_LEN);
1699 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
1700 } else {
1701 if (CRYPTO_memcmp(gctx->kma.param.t.b, in + len,
1702 EVP_GCM_TLS_TAG_LEN)) {
1703 OPENSSL_cleanse(out, len);
1704 goto err;
1705 }
1706 rv = len;
1707 }
1708err:
1709 gctx->iv_set = 0;
1710 gctx->tls_aad_len = -1;
1711 return rv;
1712}
1713
1714/*-
1715 * Called from EVP layer to initialize context, process additional
1716 * authenticated data, en/de-crypt plain/cipher-text and authenticate
1717 * ciphertext or process a TLS packet, depending on context. Returns bytes
1718 * written on success. Otherwise -1 is returned. Code is big-endian.
1719 */
1720static int s390x_aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1721 const unsigned char *in, size_t len)
1722{
1723 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, ctx);
1724 unsigned char *buf, tmp[16];
1725 int enc;
1726
1727 if (!gctx->key_set)
1728 return -1;
1729
1730 if (gctx->tls_aad_len >= 0)
1731 return s390x_aes_gcm_tls_cipher(ctx, out, in, len);
1732
1733 if (!gctx->iv_set)
1734 return -1;
1735
1736 if (in != NULL) {
1737 if (out == NULL) {
1738 if (s390x_aes_gcm_aad(gctx, in, len))
1739 return -1;
1740 } else {
1741 if (s390x_aes_gcm(gctx, in, out, len))
1742 return -1;
1743 }
1744 return len;
1745 } else {
1746 gctx->kma.param.taadl <<= 3;
1747 gctx->kma.param.tpcl <<= 3;
1748 s390x_kma(gctx->ares, gctx->areslen, gctx->mres, gctx->mreslen, tmp,
1749 gctx->fc | S390X_KMA_LAAD | S390X_KMA_LPC, &gctx->kma.param);
1750 /* recall that we already did en-/decrypt gctx->mres
1751 * and returned it to caller... */
1752 OPENSSL_cleanse(tmp, gctx->mreslen);
1753 gctx->iv_set = 0;
1754
1755 enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1756 if (enc) {
1757 gctx->taglen = 16;
1758 } else {
1759 if (gctx->taglen < 0)
1760 return -1;
1761
1762 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1763 if (CRYPTO_memcmp(buf, gctx->kma.param.t.b, gctx->taglen))
1764 return -1;
1765 }
1766 return 0;
1767 }
1768}
1769
1770static int s390x_aes_gcm_cleanup(EVP_CIPHER_CTX *c)
1771{
1772 S390X_AES_GCM_CTX *gctx = EVP_C_DATA(S390X_AES_GCM_CTX, c);
1773
1774 if (gctx == NULL)
1775 return 0;
1776
1777 if (gctx->iv != c->iv)
1778 OPENSSL_free(gctx->iv);
1779
1780 OPENSSL_cleanse(gctx, sizeof(*gctx));
1781 return 1;
1782}
1783
1784# define S390X_AES_XTS_CTX EVP_AES_XTS_CTX
1785
1786# define s390x_aes_xts_init_key aes_xts_init_key
1787static int s390x_aes_xts_init_key(EVP_CIPHER_CTX *ctx,
1788 const unsigned char *key,
1789 const unsigned char *iv, int enc);
1790# define s390x_aes_xts_cipher aes_xts_cipher
1791static int s390x_aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1792 const unsigned char *in, size_t len);
1793# define s390x_aes_xts_ctrl aes_xts_ctrl
1794static int s390x_aes_xts_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
1795# define s390x_aes_xts_cleanup aes_xts_cleanup
1796
1797/*-
1798 * Set nonce and length fields. Code is big-endian.
1799 */
1800static inline void s390x_aes_ccm_setiv(S390X_AES_CCM_CTX *ctx,
1801 const unsigned char *nonce,
1802 size_t mlen)
1803{
1804 ctx->aes.ccm.nonce.b[0] &= ~S390X_CCM_AAD_FLAG;
1805 ctx->aes.ccm.nonce.g[1] = mlen;
1806 memcpy(ctx->aes.ccm.nonce.b + 1, nonce, 15 - ctx->aes.ccm.l);
1807}
1808
1809/*-
1810 * Process additional authenticated data. Code is big-endian.
1811 */
1812static void s390x_aes_ccm_aad(S390X_AES_CCM_CTX *ctx, const unsigned char *aad,
1813 size_t alen)
1814{
1815 unsigned char *ptr;
1816 int i, rem;
1817
1818 if (!alen)
1819 return;
1820
1821 ctx->aes.ccm.nonce.b[0] |= S390X_CCM_AAD_FLAG;
1822
1823 /* Suppress 'type-punned pointer dereference' warning. */
1824 ptr = ctx->aes.ccm.buf.b;
1825
1826 if (alen < ((1 << 16) - (1 << 8))) {
1827 *(uint16_t *)ptr = alen;
1828 i = 2;
1829 } else if (sizeof(alen) == 8
1830 && alen >= (size_t)1 << (32 % (sizeof(alen) * 8))) {
1831 *(uint16_t *)ptr = 0xffff;
1832 *(uint64_t *)(ptr + 2) = alen;
1833 i = 10;
1834 } else {
1835 *(uint16_t *)ptr = 0xfffe;
1836 *(uint32_t *)(ptr + 2) = alen;
1837 i = 6;
1838 }
1839
1840 while (i < 16 && alen) {
1841 ctx->aes.ccm.buf.b[i] = *aad;
1842 ++aad;
1843 --alen;
1844 ++i;
1845 }
1846 while (i < 16) {
1847 ctx->aes.ccm.buf.b[i] = 0;
1848 ++i;
1849 }
1850
1851 ctx->aes.ccm.kmac_param.icv.g[0] = 0;
1852 ctx->aes.ccm.kmac_param.icv.g[1] = 0;
1853 s390x_kmac(ctx->aes.ccm.nonce.b, 32, ctx->aes.ccm.fc,
1854 &ctx->aes.ccm.kmac_param);
1855 ctx->aes.ccm.blocks += 2;
1856
1857 rem = alen & 0xf;
1858 alen &= ~(size_t)0xf;
1859 if (alen) {
1860 s390x_kmac(aad, alen, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1861 ctx->aes.ccm.blocks += alen >> 4;
1862 aad += alen;
1863 }
1864 if (rem) {
1865 for (i = 0; i < rem; i++)
1866 ctx->aes.ccm.kmac_param.icv.b[i] ^= aad[i];
1867
1868 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1869 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1870 ctx->aes.ccm.kmac_param.k);
1871 ctx->aes.ccm.blocks++;
1872 }
1873}
1874
1875/*-
1876 * En/de-crypt plain/cipher-text. Compute tag from plaintext. Returns 0 for
1877 * success.
1878 */
1879static int s390x_aes_ccm(S390X_AES_CCM_CTX *ctx, const unsigned char *in,
1880 unsigned char *out, size_t len, int enc)
1881{
1882 size_t n, rem;
1883 unsigned int i, l, num;
1884 unsigned char flags;
1885
1886 flags = ctx->aes.ccm.nonce.b[0];
1887 if (!(flags & S390X_CCM_AAD_FLAG)) {
1888 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.kmac_param.icv.b,
1889 ctx->aes.ccm.fc, ctx->aes.ccm.kmac_param.k);
1890 ctx->aes.ccm.blocks++;
1891 }
1892 l = flags & 0x7;
1893 ctx->aes.ccm.nonce.b[0] = l;
1894
1895 /*-
1896 * Reconstruct length from encoded length field
1897 * and initialize it with counter value.
1898 */
1899 n = 0;
1900 for (i = 15 - l; i < 15; i++) {
1901 n |= ctx->aes.ccm.nonce.b[i];
1902 ctx->aes.ccm.nonce.b[i] = 0;
1903 n <<= 8;
1904 }
1905 n |= ctx->aes.ccm.nonce.b[15];
1906 ctx->aes.ccm.nonce.b[15] = 1;
1907
1908 if (n != len)
1909 return -1; /* length mismatch */
1910
1911 if (enc) {
1912 /* Two operations per block plus one for tag encryption */
1913 ctx->aes.ccm.blocks += (((len + 15) >> 4) << 1) + 1;
1914 if (ctx->aes.ccm.blocks > (1ULL << 61))
1915 return -2; /* too much data */
1916 }
1917
1918 num = 0;
1919 rem = len & 0xf;
1920 len &= ~(size_t)0xf;
1921
1922 if (enc) {
1923 /* mac-then-encrypt */
1924 if (len)
1925 s390x_kmac(in, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1926 if (rem) {
1927 for (i = 0; i < rem; i++)
1928 ctx->aes.ccm.kmac_param.icv.b[i] ^= in[len + i];
1929
1930 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1931 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1932 ctx->aes.ccm.kmac_param.k);
1933 }
1934
1935 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1936 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1937 &num, (ctr128_f)AES_ctr32_encrypt);
1938 } else {
1939 /* decrypt-then-mac */
1940 CRYPTO_ctr128_encrypt_ctr32(in, out, len + rem, &ctx->aes.key.k,
1941 ctx->aes.ccm.nonce.b, ctx->aes.ccm.buf.b,
1942 &num, (ctr128_f)AES_ctr32_encrypt);
1943
1944 if (len)
1945 s390x_kmac(out, len, ctx->aes.ccm.fc, &ctx->aes.ccm.kmac_param);
1946 if (rem) {
1947 for (i = 0; i < rem; i++)
1948 ctx->aes.ccm.kmac_param.icv.b[i] ^= out[len + i];
1949
1950 s390x_km(ctx->aes.ccm.kmac_param.icv.b, 16,
1951 ctx->aes.ccm.kmac_param.icv.b, ctx->aes.ccm.fc,
1952 ctx->aes.ccm.kmac_param.k);
1953 }
1954 }
1955 /* encrypt tag */
1956 for (i = 15 - l; i < 16; i++)
1957 ctx->aes.ccm.nonce.b[i] = 0;
1958
1959 s390x_km(ctx->aes.ccm.nonce.b, 16, ctx->aes.ccm.buf.b, ctx->aes.ccm.fc,
1960 ctx->aes.ccm.kmac_param.k);
1961 ctx->aes.ccm.kmac_param.icv.g[0] ^= ctx->aes.ccm.buf.g[0];
1962 ctx->aes.ccm.kmac_param.icv.g[1] ^= ctx->aes.ccm.buf.g[1];
1963
1964 ctx->aes.ccm.nonce.b[0] = flags; /* restore flags field */
1965 return 0;
1966}
1967
1968/*-
1969 * En/de-crypt and authenticate TLS packet. Returns the number of bytes written
1970 * if successful. Otherwise -1 is returned.
1971 */
1972static int s390x_aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
1973 const unsigned char *in, size_t len)
1974{
1975 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
1976 unsigned char *ivec = ctx->iv;
1977 unsigned char *buf = EVP_CIPHER_CTX_buf_noconst(ctx);
1978 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
1979
1980 if (out != in
1981 || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->aes.ccm.m))
1982 return -1;
1983
1984 if (enc) {
1985 /* Set explicit iv (sequence number). */
1986 memcpy(out, buf, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1987 }
1988
1989 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
1990 /*-
1991 * Get explicit iv (sequence number). We already have fixed iv
1992 * (server/client_write_iv) here.
1993 */
1994 memcpy(ivec + EVP_CCM_TLS_FIXED_IV_LEN, in, EVP_CCM_TLS_EXPLICIT_IV_LEN);
1995 s390x_aes_ccm_setiv(cctx, ivec, len);
1996
1997 /* Process aad (sequence number|type|version|length) */
1998 s390x_aes_ccm_aad(cctx, buf, cctx->aes.ccm.tls_aad_len);
1999
2000 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
2001 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
2002
2003 if (enc) {
2004 if (s390x_aes_ccm(cctx, in, out, len, enc))
2005 return -1;
2006
2007 memcpy(out + len, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2008 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->aes.ccm.m;
2009 } else {
2010 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2011 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, in + len,
2012 cctx->aes.ccm.m))
2013 return len;
2014 }
2015
2016 OPENSSL_cleanse(out, len);
2017 return -1;
2018 }
2019}
2020
2021/*-
2022 * Set key and flag field and/or iv. Returns 1 if successful. Otherwise 0 is
2023 * returned.
2024 */
2025static int s390x_aes_ccm_init_key(EVP_CIPHER_CTX *ctx,
2026 const unsigned char *key,
2027 const unsigned char *iv, int enc)
2028{
2029 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
2030 int keylen;
2031
2032 if (iv == NULL && key == NULL)
2033 return 1;
2034
2035 if (key != NULL) {
2036 keylen = EVP_CIPHER_CTX_get_key_length(ctx);
2037 if (keylen <= 0) {
2038 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2039 return 0;
2040 }
2041
2042 cctx->aes.ccm.fc = S390X_AES_FC(keylen);
2043 memcpy(cctx->aes.ccm.kmac_param.k, key, keylen);
2044
2045 /* Store encoded m and l. */
2046 cctx->aes.ccm.nonce.b[0] = ((cctx->aes.ccm.l - 1) & 0x7)
2047 | (((cctx->aes.ccm.m - 2) >> 1) & 0x7) << 3;
2048 memset(cctx->aes.ccm.nonce.b + 1, 0,
2049 sizeof(cctx->aes.ccm.nonce.b));
2050 cctx->aes.ccm.blocks = 0;
2051
2052 cctx->aes.ccm.key_set = 1;
2053 }
2054
2055 if (iv != NULL) {
2056 memcpy(ctx->iv, iv, 15 - cctx->aes.ccm.l);
2057
2058 cctx->aes.ccm.iv_set = 1;
2059 }
2060
2061 return 1;
2062}
2063
2064/*-
2065 * Called from EVP layer to initialize context, process additional
2066 * authenticated data, en/de-crypt plain/cipher-text and authenticate
2067 * plaintext or process a TLS packet, depending on context. Returns bytes
2068 * written on success. Otherwise -1 is returned.
2069 */
2070static int s390x_aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2071 const unsigned char *in, size_t len)
2072{
2073 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, ctx);
2074 const int enc = EVP_CIPHER_CTX_is_encrypting(ctx);
2075 int rv;
2076 unsigned char *buf;
2077
2078 if (!cctx->aes.ccm.key_set)
2079 return -1;
2080
2081 if (cctx->aes.ccm.tls_aad_len >= 0)
2082 return s390x_aes_ccm_tls_cipher(ctx, out, in, len);
2083
2084 /*-
2085 * Final(): Does not return any data. Recall that ccm is mac-then-encrypt
2086 * so integrity must be checked already at Update() i.e., before
2087 * potentially corrupted data is output.
2088 */
2089 if (in == NULL && out != NULL)
2090 return 0;
2091
2092 if (!cctx->aes.ccm.iv_set)
2093 return -1;
2094
2095 if (out == NULL) {
2096 /* Update(): Pass message length. */
2097 if (in == NULL) {
2098 s390x_aes_ccm_setiv(cctx, ctx->iv, len);
2099
2100 cctx->aes.ccm.len_set = 1;
2101 return len;
2102 }
2103
2104 /* Update(): Process aad. */
2105 if (!cctx->aes.ccm.len_set && len)
2106 return -1;
2107
2108 s390x_aes_ccm_aad(cctx, in, len);
2109 return len;
2110 }
2111
2112 /* The tag must be set before actually decrypting data */
2113 if (!enc && !cctx->aes.ccm.tag_set)
2114 return -1;
2115
2116 /* Update(): Process message. */
2117
2118 if (!cctx->aes.ccm.len_set) {
2119 /*-
2120 * In case message length was not previously set explicitly via
2121 * Update(), set it now.
2122 */
2123 s390x_aes_ccm_setiv(cctx, ctx->iv, len);
2124
2125 cctx->aes.ccm.len_set = 1;
2126 }
2127
2128 if (enc) {
2129 if (s390x_aes_ccm(cctx, in, out, len, enc))
2130 return -1;
2131
2132 cctx->aes.ccm.tag_set = 1;
2133 return len;
2134 } else {
2135 rv = -1;
2136
2137 if (!s390x_aes_ccm(cctx, in, out, len, enc)) {
2138 buf = EVP_CIPHER_CTX_buf_noconst(ctx);
2139 if (!CRYPTO_memcmp(cctx->aes.ccm.kmac_param.icv.b, buf,
2140 cctx->aes.ccm.m))
2141 rv = len;
2142 }
2143
2144 if (rv == -1)
2145 OPENSSL_cleanse(out, len);
2146
2147 cctx->aes.ccm.iv_set = 0;
2148 cctx->aes.ccm.tag_set = 0;
2149 cctx->aes.ccm.len_set = 0;
2150 return rv;
2151 }
2152}
2153
2154/*-
2155 * Performs various operations on the context structure depending on control
2156 * type. Returns 1 for success, 0 for failure and -1 for unknown control type.
2157 * Code is big-endian.
2158 */
2159static int s390x_aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2160{
2161 S390X_AES_CCM_CTX *cctx = EVP_C_DATA(S390X_AES_CCM_CTX, c);
2162 unsigned char *buf;
2163 int enc, len;
2164
2165 switch (type) {
2166 case EVP_CTRL_INIT:
2167 cctx->aes.ccm.key_set = 0;
2168 cctx->aes.ccm.iv_set = 0;
2169 cctx->aes.ccm.l = 8;
2170 cctx->aes.ccm.m = 12;
2171 cctx->aes.ccm.tag_set = 0;
2172 cctx->aes.ccm.len_set = 0;
2173 cctx->aes.ccm.tls_aad_len = -1;
2174 return 1;
2175
2176 case EVP_CTRL_GET_IVLEN:
2177 *(int *)ptr = 15 - cctx->aes.ccm.l;
2178 return 1;
2179
2180 case EVP_CTRL_AEAD_TLS1_AAD:
2181 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2182 return 0;
2183
2184 /* Save the aad for later use. */
2185 buf = EVP_CIPHER_CTX_buf_noconst(c);
2186 memcpy(buf, ptr, arg);
2187 cctx->aes.ccm.tls_aad_len = arg;
2188
2189 len = buf[arg - 2] << 8 | buf[arg - 1];
2190 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
2191 return 0;
2192
2193 /* Correct length for explicit iv. */
2194 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
2195
2196 enc = EVP_CIPHER_CTX_is_encrypting(c);
2197 if (!enc) {
2198 if (len < cctx->aes.ccm.m)
2199 return 0;
2200
2201 /* Correct length for tag. */
2202 len -= cctx->aes.ccm.m;
2203 }
2204
2205 buf[arg - 2] = len >> 8;
2206 buf[arg - 1] = len & 0xff;
2207
2208 /* Extra padding: tag appended to record. */
2209 return cctx->aes.ccm.m;
2210
2211 case EVP_CTRL_CCM_SET_IV_FIXED:
2212 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
2213 return 0;
2214
2215 /* Copy to first part of the iv. */
2216 memcpy(c->iv, ptr, arg);
2217 return 1;
2218
2219 case EVP_CTRL_AEAD_SET_IVLEN:
2220 arg = 15 - arg;
2221 /* fall-through */
2222
2223 case EVP_CTRL_CCM_SET_L:
2224 if (arg < 2 || arg > 8)
2225 return 0;
2226
2227 cctx->aes.ccm.l = arg;
2228 return 1;
2229
2230 case EVP_CTRL_AEAD_SET_TAG:
2231 if ((arg & 1) || arg < 4 || arg > 16)
2232 return 0;
2233
2234 enc = EVP_CIPHER_CTX_is_encrypting(c);
2235 if (enc && ptr)
2236 return 0;
2237
2238 if (ptr) {
2239 cctx->aes.ccm.tag_set = 1;
2240 buf = EVP_CIPHER_CTX_buf_noconst(c);
2241 memcpy(buf, ptr, arg);
2242 }
2243
2244 cctx->aes.ccm.m = arg;
2245 return 1;
2246
2247 case EVP_CTRL_AEAD_GET_TAG:
2248 enc = EVP_CIPHER_CTX_is_encrypting(c);
2249 if (!enc || !cctx->aes.ccm.tag_set)
2250 return 0;
2251
2252 if(arg < cctx->aes.ccm.m)
2253 return 0;
2254
2255 memcpy(ptr, cctx->aes.ccm.kmac_param.icv.b, cctx->aes.ccm.m);
2256 cctx->aes.ccm.tag_set = 0;
2257 cctx->aes.ccm.iv_set = 0;
2258 cctx->aes.ccm.len_set = 0;
2259 return 1;
2260
2261 case EVP_CTRL_COPY:
2262 return 1;
2263
2264 default:
2265 return -1;
2266 }
2267}
2268
2269# define s390x_aes_ccm_cleanup aes_ccm_cleanup
2270
2271# ifndef OPENSSL_NO_OCB
2272# define S390X_AES_OCB_CTX EVP_AES_OCB_CTX
2273
2274# define s390x_aes_ocb_init_key aes_ocb_init_key
2275static int s390x_aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2276 const unsigned char *iv, int enc);
2277# define s390x_aes_ocb_cipher aes_ocb_cipher
2278static int s390x_aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2279 const unsigned char *in, size_t len);
2280# define s390x_aes_ocb_cleanup aes_ocb_cleanup
2281static int s390x_aes_ocb_cleanup(EVP_CIPHER_CTX *);
2282# define s390x_aes_ocb_ctrl aes_ocb_ctrl
2283static int s390x_aes_ocb_ctrl(EVP_CIPHER_CTX *, int type, int arg, void *ptr);
2284# endif
2285
2286# ifndef OPENSSL_NO_SIV
2287# define S390X_AES_SIV_CTX EVP_AES_SIV_CTX
2288
2289# define s390x_aes_siv_init_key aes_siv_init_key
2290# define s390x_aes_siv_cipher aes_siv_cipher
2291# define s390x_aes_siv_cleanup aes_siv_cleanup
2292# define s390x_aes_siv_ctrl aes_siv_ctrl
2293# endif
2294
2295# define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode, \
2296 MODE,flags) \
2297static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2298 nid##_##keylen##_##nmode,blocksize, \
2299 keylen / 8, \
2300 ivlen, \
2301 flags | EVP_CIPH_##MODE##_MODE, \
2302 EVP_ORIG_GLOBAL, \
2303 s390x_aes_##mode##_init_key, \
2304 s390x_aes_##mode##_cipher, \
2305 NULL, \
2306 sizeof(S390X_AES_##MODE##_CTX), \
2307 NULL, \
2308 NULL, \
2309 NULL, \
2310 NULL \
2311}; \
2312static const EVP_CIPHER aes_##keylen##_##mode = { \
2313 nid##_##keylen##_##nmode, \
2314 blocksize, \
2315 keylen / 8, \
2316 ivlen, \
2317 flags | EVP_CIPH_##MODE##_MODE, \
2318 EVP_ORIG_GLOBAL, \
2319 aes_init_key, \
2320 aes_##mode##_cipher, \
2321 NULL, \
2322 sizeof(EVP_AES_KEY), \
2323 NULL, \
2324 NULL, \
2325 NULL, \
2326 NULL \
2327}; \
2328const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2329{ \
2330 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2331 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2332}
2333
2334# define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags)\
2335static const EVP_CIPHER s390x_aes_##keylen##_##mode = { \
2336 nid##_##keylen##_##mode, \
2337 blocksize, \
2338 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2339 ivlen, \
2340 flags | EVP_CIPH_##MODE##_MODE, \
2341 EVP_ORIG_GLOBAL, \
2342 s390x_aes_##mode##_init_key, \
2343 s390x_aes_##mode##_cipher, \
2344 s390x_aes_##mode##_cleanup, \
2345 sizeof(S390X_AES_##MODE##_CTX), \
2346 NULL, \
2347 NULL, \
2348 s390x_aes_##mode##_ctrl, \
2349 NULL \
2350}; \
2351static const EVP_CIPHER aes_##keylen##_##mode = { \
2352 nid##_##keylen##_##mode,blocksize, \
2353 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE ? 2 : 1) * keylen / 8, \
2354 ivlen, \
2355 flags | EVP_CIPH_##MODE##_MODE, \
2356 EVP_ORIG_GLOBAL, \
2357 aes_##mode##_init_key, \
2358 aes_##mode##_cipher, \
2359 aes_##mode##_cleanup, \
2360 sizeof(EVP_AES_##MODE##_CTX), \
2361 NULL, \
2362 NULL, \
2363 aes_##mode##_ctrl, \
2364 NULL \
2365}; \
2366const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2367{ \
2368 return S390X_aes_##keylen##_##mode##_CAPABLE ? \
2369 &s390x_aes_##keylen##_##mode : &aes_##keylen##_##mode; \
2370}
2371
2372#else
2373
2374# define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
2375static const EVP_CIPHER aes_##keylen##_##mode = { \
2376 nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
2377 flags|EVP_CIPH_##MODE##_MODE, \
2378 EVP_ORIG_GLOBAL, \
2379 aes_init_key, \
2380 aes_##mode##_cipher, \
2381 NULL, \
2382 sizeof(EVP_AES_KEY), \
2383 NULL,NULL,NULL,NULL }; \
2384const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2385{ return &aes_##keylen##_##mode; }
2386
2387# define BLOCK_CIPHER_custom(nid,keylen,blocksize,ivlen,mode,MODE,flags) \
2388static const EVP_CIPHER aes_##keylen##_##mode = { \
2389 nid##_##keylen##_##mode,blocksize, \
2390 (EVP_CIPH_##MODE##_MODE==EVP_CIPH_XTS_MODE||EVP_CIPH_##MODE##_MODE==EVP_CIPH_SIV_MODE?2:1)*keylen/8, \
2391 ivlen, \
2392 flags|EVP_CIPH_##MODE##_MODE, \
2393 EVP_ORIG_GLOBAL, \
2394 aes_##mode##_init_key, \
2395 aes_##mode##_cipher, \
2396 aes_##mode##_cleanup, \
2397 sizeof(EVP_AES_##MODE##_CTX), \
2398 NULL,NULL,aes_##mode##_ctrl,NULL }; \
2399const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
2400{ return &aes_##keylen##_##mode; }
2401
2402#endif
2403
2404#define BLOCK_CIPHER_generic_pack(nid,keylen,flags) \
2405 BLOCK_CIPHER_generic(nid,keylen,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2406 BLOCK_CIPHER_generic(nid,keylen,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2407 BLOCK_CIPHER_generic(nid,keylen,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2408 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1) \
2409 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb1,cfb1,CFB,flags) \
2410 BLOCK_CIPHER_generic(nid,keylen,1,16,cfb8,cfb8,CFB,flags) \
2411 BLOCK_CIPHER_generic(nid,keylen,1,16,ctr,ctr,CTR,flags)
2412
2413static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2414 const unsigned char *iv, int enc)
2415{
2416 int ret, mode;
2417 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2418 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
2419
2420 if (keylen <= 0) {
2421 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2422 return 0;
2423 }
2424
2425 mode = EVP_CIPHER_CTX_get_mode(ctx);
2426 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
2427 && !enc) {
2428#ifdef HWAES_CAPABLE
2429 if (HWAES_CAPABLE) {
2430 ret = HWAES_set_decrypt_key(key, keylen, &dat->ks.ks);
2431 dat->block = (block128_f) HWAES_decrypt;
2432 dat->stream.cbc = NULL;
2433# ifdef HWAES_cbc_encrypt
2434 if (mode == EVP_CIPH_CBC_MODE)
2435 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2436# endif
2437 } else
2438#endif
2439#ifdef BSAES_CAPABLE
2440 if (BSAES_CAPABLE && mode == EVP_CIPH_CBC_MODE) {
2441 ret = AES_set_decrypt_key(key, keylen, &dat->ks.ks);
2442 dat->block = (block128_f) AES_decrypt;
2443 dat->stream.cbc = (cbc128_f) ossl_bsaes_cbc_encrypt;
2444 } else
2445#endif
2446#ifdef VPAES_CAPABLE
2447 if (VPAES_CAPABLE) {
2448 ret = vpaes_set_decrypt_key(key, keylen, &dat->ks.ks);
2449 dat->block = (block128_f) vpaes_decrypt;
2450 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2451 (cbc128_f) vpaes_cbc_encrypt : NULL;
2452 } else
2453#endif
2454 {
2455 ret = AES_set_decrypt_key(key, keylen, &dat->ks.ks);
2456 dat->block = (block128_f) AES_decrypt;
2457 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2458 (cbc128_f) AES_cbc_encrypt : NULL;
2459 }
2460 } else
2461#ifdef HWAES_CAPABLE
2462 if (HWAES_CAPABLE) {
2463 ret = HWAES_set_encrypt_key(key, keylen, &dat->ks.ks);
2464 dat->block = (block128_f) HWAES_encrypt;
2465 dat->stream.cbc = NULL;
2466# ifdef HWAES_cbc_encrypt
2467 if (mode == EVP_CIPH_CBC_MODE)
2468 dat->stream.cbc = (cbc128_f) HWAES_cbc_encrypt;
2469 else
2470# endif
2471# ifdef HWAES_ctr32_encrypt_blocks
2472 if (mode == EVP_CIPH_CTR_MODE)
2473 dat->stream.ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2474 else
2475# endif
2476 (void)0; /* terminate potentially open 'else' */
2477 } else
2478#endif
2479#ifdef BSAES_CAPABLE
2480 if (BSAES_CAPABLE && mode == EVP_CIPH_CTR_MODE) {
2481 ret = AES_set_encrypt_key(key, keylen, &dat->ks.ks);
2482 dat->block = (block128_f) AES_encrypt;
2483 dat->stream.ctr = (ctr128_f) ossl_bsaes_ctr32_encrypt_blocks;
2484 } else
2485#endif
2486#ifdef VPAES_CAPABLE
2487 if (VPAES_CAPABLE) {
2488 ret = vpaes_set_encrypt_key(key, keylen, &dat->ks.ks);
2489 dat->block = (block128_f) vpaes_encrypt;
2490 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2491 (cbc128_f) vpaes_cbc_encrypt : NULL;
2492 } else
2493#endif
2494 {
2495 ret = AES_set_encrypt_key(key, keylen, &dat->ks.ks);
2496 dat->block = (block128_f) AES_encrypt;
2497 dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ?
2498 (cbc128_f) AES_cbc_encrypt : NULL;
2499#ifdef AES_CTR_ASM
2500 if (mode == EVP_CIPH_CTR_MODE)
2501 dat->stream.ctr = (ctr128_f) AES_ctr32_encrypt;
2502#endif
2503 }
2504
2505 if (ret < 0) {
2506 ERR_raise(ERR_LIB_EVP, EVP_R_AES_KEY_SETUP_FAILED);
2507 return 0;
2508 }
2509
2510 return 1;
2511}
2512
2513static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2514 const unsigned char *in, size_t len)
2515{
2516 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2517
2518 if (dat->stream.cbc)
2519 (*dat->stream.cbc) (in, out, len, &dat->ks, ctx->iv,
2520 EVP_CIPHER_CTX_is_encrypting(ctx));
2521 else if (EVP_CIPHER_CTX_is_encrypting(ctx))
2522 CRYPTO_cbc128_encrypt(in, out, len, &dat->ks, ctx->iv,
2523 dat->block);
2524 else
2525 CRYPTO_cbc128_decrypt(in, out, len, &dat->ks,
2526 ctx->iv, dat->block);
2527
2528 return 1;
2529}
2530
2531static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2532 const unsigned char *in, size_t len)
2533{
2534 size_t bl = EVP_CIPHER_CTX_get_block_size(ctx);
2535 size_t i;
2536 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2537
2538 if (len < bl)
2539 return 1;
2540
2541 for (i = 0, len -= bl; i <= len; i += bl)
2542 (*dat->block) (in + i, out + i, &dat->ks);
2543
2544 return 1;
2545}
2546
2547static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2548 const unsigned char *in, size_t len)
2549{
2550 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2551
2552 int num = EVP_CIPHER_CTX_get_num(ctx);
2553 CRYPTO_ofb128_encrypt(in, out, len, &dat->ks,
2554 ctx->iv, &num, dat->block);
2555 EVP_CIPHER_CTX_set_num(ctx, num);
2556 return 1;
2557}
2558
2559static int aes_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2560 const unsigned char *in, size_t len)
2561{
2562 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2563
2564 int num = EVP_CIPHER_CTX_get_num(ctx);
2565 CRYPTO_cfb128_encrypt(in, out, len, &dat->ks,
2566 ctx->iv, &num,
2567 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2568 EVP_CIPHER_CTX_set_num(ctx, num);
2569 return 1;
2570}
2571
2572static int aes_cfb8_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2573 const unsigned char *in, size_t len)
2574{
2575 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2576
2577 int num = EVP_CIPHER_CTX_get_num(ctx);
2578 CRYPTO_cfb128_8_encrypt(in, out, len, &dat->ks,
2579 ctx->iv, &num,
2580 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2581 EVP_CIPHER_CTX_set_num(ctx, num);
2582 return 1;
2583}
2584
2585static int aes_cfb1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2586 const unsigned char *in, size_t len)
2587{
2588 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2589
2590 if (EVP_CIPHER_CTX_test_flags(ctx, EVP_CIPH_FLAG_LENGTH_BITS)) {
2591 int num = EVP_CIPHER_CTX_get_num(ctx);
2592 CRYPTO_cfb128_1_encrypt(in, out, len, &dat->ks,
2593 ctx->iv, &num,
2594 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2595 EVP_CIPHER_CTX_set_num(ctx, num);
2596 return 1;
2597 }
2598
2599 while (len >= MAXBITCHUNK) {
2600 int num = EVP_CIPHER_CTX_get_num(ctx);
2601 CRYPTO_cfb128_1_encrypt(in, out, MAXBITCHUNK * 8, &dat->ks,
2602 ctx->iv, &num,
2603 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2604 EVP_CIPHER_CTX_set_num(ctx, num);
2605 len -= MAXBITCHUNK;
2606 out += MAXBITCHUNK;
2607 in += MAXBITCHUNK;
2608 }
2609 if (len) {
2610 int num = EVP_CIPHER_CTX_get_num(ctx);
2611 CRYPTO_cfb128_1_encrypt(in, out, len * 8, &dat->ks,
2612 ctx->iv, &num,
2613 EVP_CIPHER_CTX_is_encrypting(ctx), dat->block);
2614 EVP_CIPHER_CTX_set_num(ctx, num);
2615 }
2616
2617 return 1;
2618}
2619
2620static int aes_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2621 const unsigned char *in, size_t len)
2622{
2623 int n = EVP_CIPHER_CTX_get_num(ctx);
2624 unsigned int num;
2625 EVP_AES_KEY *dat = EVP_C_DATA(EVP_AES_KEY,ctx);
2626
2627 if (n < 0)
2628 return 0;
2629 num = (unsigned int)n;
2630
2631 if (dat->stream.ctr)
2632 CRYPTO_ctr128_encrypt_ctr32(in, out, len, &dat->ks,
2633 ctx->iv,
2634 EVP_CIPHER_CTX_buf_noconst(ctx),
2635 &num, dat->stream.ctr);
2636 else
2637 CRYPTO_ctr128_encrypt(in, out, len, &dat->ks,
2638 ctx->iv,
2639 EVP_CIPHER_CTX_buf_noconst(ctx), &num,
2640 dat->block);
2641 EVP_CIPHER_CTX_set_num(ctx, num);
2642 return 1;
2643}
2644
2645BLOCK_CIPHER_generic_pack(NID_aes, 128, 0)
2646 BLOCK_CIPHER_generic_pack(NID_aes, 192, 0)
2647 BLOCK_CIPHER_generic_pack(NID_aes, 256, 0)
2648
2649static int aes_gcm_cleanup(EVP_CIPHER_CTX *c)
2650{
2651 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2652 if (gctx == NULL)
2653 return 0;
2654 OPENSSL_cleanse(&gctx->gcm, sizeof(gctx->gcm));
2655 if (gctx->iv != c->iv)
2656 OPENSSL_free(gctx->iv);
2657 return 1;
2658}
2659
2660static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
2661{
2662 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,c);
2663 switch (type) {
2664 case EVP_CTRL_INIT:
2665 gctx->key_set = 0;
2666 gctx->iv_set = 0;
2667 gctx->ivlen = EVP_CIPHER_get_iv_length(c->cipher);
2668 gctx->iv = c->iv;
2669 gctx->taglen = -1;
2670 gctx->iv_gen = 0;
2671 gctx->tls_aad_len = -1;
2672 return 1;
2673
2674 case EVP_CTRL_GET_IVLEN:
2675 *(int *)ptr = gctx->ivlen;
2676 return 1;
2677
2678 case EVP_CTRL_AEAD_SET_IVLEN:
2679 if (arg <= 0)
2680 return 0;
2681 /* Allocate memory for IV if needed */
2682 if ((arg > EVP_MAX_IV_LENGTH) && (arg > gctx->ivlen)) {
2683 if (gctx->iv != c->iv)
2684 OPENSSL_free(gctx->iv);
2685 if ((gctx->iv = OPENSSL_malloc(arg)) == NULL) {
2686 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
2687 return 0;
2688 }
2689 }
2690 gctx->ivlen = arg;
2691 return 1;
2692
2693 case EVP_CTRL_AEAD_SET_TAG:
2694 if (arg <= 0 || arg > 16 || c->encrypt)
2695 return 0;
2696 memcpy(c->buf, ptr, arg);
2697 gctx->taglen = arg;
2698 return 1;
2699
2700 case EVP_CTRL_AEAD_GET_TAG:
2701 if (arg <= 0 || arg > 16 || !c->encrypt
2702 || gctx->taglen < 0)
2703 return 0;
2704 memcpy(ptr, c->buf, arg);
2705 return 1;
2706
2707 case EVP_CTRL_GCM_SET_IV_FIXED:
2708 /* Special case: -1 length restores whole IV */
2709 if (arg == -1) {
2710 memcpy(gctx->iv, ptr, gctx->ivlen);
2711 gctx->iv_gen = 1;
2712 return 1;
2713 }
2714 /*
2715 * Fixed field must be at least 4 bytes and invocation field at least
2716 * 8.
2717 */
2718 if ((arg < 4) || (gctx->ivlen - arg) < 8)
2719 return 0;
2720 if (arg)
2721 memcpy(gctx->iv, ptr, arg);
2722 if (c->encrypt && RAND_bytes(gctx->iv + arg, gctx->ivlen - arg) <= 0)
2723 return 0;
2724 gctx->iv_gen = 1;
2725 return 1;
2726
2727 case EVP_CTRL_GCM_IV_GEN:
2728 if (gctx->iv_gen == 0 || gctx->key_set == 0)
2729 return 0;
2730 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2731 if (arg <= 0 || arg > gctx->ivlen)
2732 arg = gctx->ivlen;
2733 memcpy(ptr, gctx->iv + gctx->ivlen - arg, arg);
2734 /*
2735 * Invocation field will be at least 8 bytes in size and so no need
2736 * to check wrap around or increment more than last 8 bytes.
2737 */
2738 ctr64_inc(gctx->iv + gctx->ivlen - 8);
2739 gctx->iv_set = 1;
2740 return 1;
2741
2742 case EVP_CTRL_GCM_SET_IV_INV:
2743 if (gctx->iv_gen == 0 || gctx->key_set == 0 || c->encrypt)
2744 return 0;
2745 memcpy(gctx->iv + gctx->ivlen - arg, ptr, arg);
2746 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
2747 gctx->iv_set = 1;
2748 return 1;
2749
2750 case EVP_CTRL_AEAD_TLS1_AAD:
2751 /* Save the AAD for later use */
2752 if (arg != EVP_AEAD_TLS1_AAD_LEN)
2753 return 0;
2754 memcpy(c->buf, ptr, arg);
2755 gctx->tls_aad_len = arg;
2756 gctx->tls_enc_records = 0;
2757 {
2758 unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1];
2759 /* Correct length for explicit IV */
2760 if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
2761 return 0;
2762 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
2763 /* If decrypting correct for tag too */
2764 if (!c->encrypt) {
2765 if (len < EVP_GCM_TLS_TAG_LEN)
2766 return 0;
2767 len -= EVP_GCM_TLS_TAG_LEN;
2768 }
2769 c->buf[arg - 2] = len >> 8;
2770 c->buf[arg - 1] = len & 0xff;
2771 }
2772 /* Extra padding: tag appended to record */
2773 return EVP_GCM_TLS_TAG_LEN;
2774
2775 case EVP_CTRL_COPY:
2776 {
2777 EVP_CIPHER_CTX *out = ptr;
2778 EVP_AES_GCM_CTX *gctx_out = EVP_C_DATA(EVP_AES_GCM_CTX,out);
2779 if (gctx->gcm.key) {
2780 if (gctx->gcm.key != &gctx->ks)
2781 return 0;
2782 gctx_out->gcm.key = &gctx_out->ks;
2783 }
2784 if (gctx->iv == c->iv)
2785 gctx_out->iv = out->iv;
2786 else {
2787 if ((gctx_out->iv = OPENSSL_malloc(gctx->ivlen)) == NULL) {
2788 ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
2789 return 0;
2790 }
2791 memcpy(gctx_out->iv, gctx->iv, gctx->ivlen);
2792 }
2793 return 1;
2794 }
2795
2796 default:
2797 return -1;
2798
2799 }
2800}
2801
2802static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
2803 const unsigned char *iv, int enc)
2804{
2805 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2806
2807 if (iv == NULL && key == NULL)
2808 return 1;
2809
2810 if (key != NULL) {
2811 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
2812
2813 if (keylen <= 0) {
2814 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
2815 return 0;
2816 }
2817 do {
2818#ifdef HWAES_CAPABLE
2819 if (HWAES_CAPABLE) {
2820 HWAES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2821 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2822 (block128_f) HWAES_encrypt);
2823# ifdef HWAES_ctr32_encrypt_blocks
2824 gctx->ctr = (ctr128_f) HWAES_ctr32_encrypt_blocks;
2825# else
2826 gctx->ctr = NULL;
2827# endif
2828 break;
2829 } else
2830#endif
2831#ifdef BSAES_CAPABLE
2832 if (BSAES_CAPABLE) {
2833 AES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2834 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2835 (block128_f) AES_encrypt);
2836 gctx->ctr = (ctr128_f) ossl_bsaes_ctr32_encrypt_blocks;
2837 break;
2838 } else
2839#endif
2840#ifdef VPAES_CAPABLE
2841 if (VPAES_CAPABLE) {
2842 vpaes_set_encrypt_key(key, keylen, &gctx->ks.ks);
2843 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2844 (block128_f) vpaes_encrypt);
2845 gctx->ctr = NULL;
2846 break;
2847 } else
2848#endif
2849 (void)0; /* terminate potentially open 'else' */
2850
2851 AES_set_encrypt_key(key, keylen, &gctx->ks.ks);
2852 CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks,
2853 (block128_f) AES_encrypt);
2854#ifdef AES_CTR_ASM
2855 gctx->ctr = (ctr128_f) AES_ctr32_encrypt;
2856#else
2857 gctx->ctr = NULL;
2858#endif
2859 } while (0);
2860
2861 /*
2862 * If we have an iv can set it directly, otherwise use saved IV.
2863 */
2864 if (iv == NULL && gctx->iv_set)
2865 iv = gctx->iv;
2866 if (iv) {
2867 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2868 gctx->iv_set = 1;
2869 }
2870 gctx->key_set = 1;
2871 } else {
2872 /* If key set use IV, otherwise copy */
2873 if (gctx->key_set)
2874 CRYPTO_gcm128_setiv(&gctx->gcm, iv, gctx->ivlen);
2875 else
2876 memcpy(gctx->iv, iv, gctx->ivlen);
2877 gctx->iv_set = 1;
2878 gctx->iv_gen = 0;
2879 }
2880 return 1;
2881}
2882
2883/*
2884 * Handle TLS GCM packet format. This consists of the last portion of the IV
2885 * followed by the payload and finally the tag. On encrypt generate IV,
2886 * encrypt payload and write the tag. On verify retrieve IV, decrypt payload
2887 * and verify tag.
2888 */
2889
2890static int aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
2891 const unsigned char *in, size_t len)
2892{
2893 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
2894 int rv = -1;
2895 /* Encrypt/decrypt must be performed in place */
2896 if (out != in
2897 || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
2898 return -1;
2899
2900 /*
2901 * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
2902 * Requirements from SP 800-38D". The requirements is for one party to the
2903 * communication to fail after 2^64 - 1 keys. We do this on the encrypting
2904 * side only.
2905 */
2906 if (ctx->encrypt && ++gctx->tls_enc_records == 0) {
2907 ERR_raise(ERR_LIB_EVP, EVP_R_TOO_MANY_RECORDS);
2908 goto err;
2909 }
2910
2911 /*
2912 * Set IV from start of buffer or generate IV and write to start of
2913 * buffer.
2914 */
2915 if (EVP_CIPHER_CTX_ctrl(ctx, ctx->encrypt ? EVP_CTRL_GCM_IV_GEN
2916 : EVP_CTRL_GCM_SET_IV_INV,
2917 EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
2918 goto err;
2919 /* Use saved AAD */
2920 if (CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len))
2921 goto err;
2922 /* Fix buffer and length to point to payload */
2923 in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2924 out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
2925 len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2926 if (ctx->encrypt) {
2927 /* Encrypt payload */
2928 if (gctx->ctr) {
2929 size_t bulk = 0;
2930#if defined(AES_GCM_ASM)
2931 if (len >= 32 && AES_GCM_ASM(gctx)) {
2932 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2933 return -1;
2934
2935 bulk = AES_gcm_encrypt(in, out, len,
2936 gctx->gcm.key,
2937 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2938 gctx->gcm.len.u[1] += bulk;
2939 }
2940#endif
2941 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
2942 in + bulk,
2943 out + bulk,
2944 len - bulk, gctx->ctr))
2945 goto err;
2946 } else {
2947 size_t bulk = 0;
2948#if defined(AES_GCM_ASM2)
2949 if (len >= 32 && AES_GCM_ASM2(gctx)) {
2950 if (CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
2951 return -1;
2952
2953 bulk = AES_gcm_encrypt(in, out, len,
2954 gctx->gcm.key,
2955 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2956 gctx->gcm.len.u[1] += bulk;
2957 }
2958#endif
2959 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
2960 in + bulk, out + bulk, len - bulk))
2961 goto err;
2962 }
2963 out += len;
2964 /* Finally write tag */
2965 CRYPTO_gcm128_tag(&gctx->gcm, out, EVP_GCM_TLS_TAG_LEN);
2966 rv = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
2967 } else {
2968 /* Decrypt */
2969 if (gctx->ctr) {
2970 size_t bulk = 0;
2971#if defined(AES_GCM_ASM)
2972 if (len >= 16 && AES_GCM_ASM(gctx)) {
2973 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2974 return -1;
2975
2976 bulk = AES_gcm_decrypt(in, out, len,
2977 gctx->gcm.key,
2978 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2979 gctx->gcm.len.u[1] += bulk;
2980 }
2981#endif
2982 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
2983 in + bulk,
2984 out + bulk,
2985 len - bulk, gctx->ctr))
2986 goto err;
2987 } else {
2988 size_t bulk = 0;
2989#if defined(AES_GCM_ASM2)
2990 if (len >= 16 && AES_GCM_ASM2(gctx)) {
2991 if (CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
2992 return -1;
2993
2994 bulk = AES_gcm_decrypt(in, out, len,
2995 gctx->gcm.key,
2996 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
2997 gctx->gcm.len.u[1] += bulk;
2998 }
2999#endif
3000 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
3001 in + bulk, out + bulk, len - bulk))
3002 goto err;
3003 }
3004 /* Retrieve tag */
3005 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, EVP_GCM_TLS_TAG_LEN);
3006 /* If tag mismatch wipe buffer */
3007 if (CRYPTO_memcmp(ctx->buf, in + len, EVP_GCM_TLS_TAG_LEN)) {
3008 OPENSSL_cleanse(out, len);
3009 goto err;
3010 }
3011 rv = len;
3012 }
3013
3014 err:
3015 gctx->iv_set = 0;
3016 gctx->tls_aad_len = -1;
3017 return rv;
3018}
3019
3020#ifdef FIPS_MODULE
3021/*
3022 * See SP800-38D (GCM) Section 8 "Uniqueness requirement on IVS and keys"
3023 *
3024 * See also 8.2.2 RBG-based construction.
3025 * Random construction consists of a free field (which can be NULL) and a
3026 * random field which will use a DRBG that can return at least 96 bits of
3027 * entropy strength. (The DRBG must be seeded by the FIPS module).
3028 */
3029static int aes_gcm_iv_generate(EVP_AES_GCM_CTX *gctx, int offset)
3030{
3031 int sz = gctx->ivlen - offset;
3032
3033 /* Must be at least 96 bits */
3034 if (sz <= 0 || gctx->ivlen < 12)
3035 return 0;
3036
3037 /* Use DRBG to generate random iv */
3038 if (RAND_bytes(gctx->iv + offset, sz) <= 0)
3039 return 0;
3040 return 1;
3041}
3042#endif /* FIPS_MODULE */
3043
3044static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3045 const unsigned char *in, size_t len)
3046{
3047 EVP_AES_GCM_CTX *gctx = EVP_C_DATA(EVP_AES_GCM_CTX,ctx);
3048
3049 /* If not set up, return error */
3050 if (!gctx->key_set)
3051 return -1;
3052
3053 if (gctx->tls_aad_len >= 0)
3054 return aes_gcm_tls_cipher(ctx, out, in, len);
3055
3056#ifdef FIPS_MODULE
3057 /*
3058 * FIPS requires generation of AES-GCM IV's inside the FIPS module.
3059 * The IV can still be set externally (the security policy will state that
3060 * this is not FIPS compliant). There are some applications
3061 * where setting the IV externally is the only option available.
3062 */
3063 if (!gctx->iv_set) {
3064 if (!ctx->encrypt || !aes_gcm_iv_generate(gctx, 0))
3065 return -1;
3066 CRYPTO_gcm128_setiv(&gctx->gcm, gctx->iv, gctx->ivlen);
3067 gctx->iv_set = 1;
3068 gctx->iv_gen_rand = 1;
3069 }
3070#else
3071 if (!gctx->iv_set)
3072 return -1;
3073#endif /* FIPS_MODULE */
3074
3075 if (in) {
3076 if (out == NULL) {
3077 if (CRYPTO_gcm128_aad(&gctx->gcm, in, len))
3078 return -1;
3079 } else if (ctx->encrypt) {
3080 if (gctx->ctr) {
3081 size_t bulk = 0;
3082#if defined(AES_GCM_ASM)
3083 if (len >= 32 && AES_GCM_ASM(gctx)) {
3084 size_t res = (16 - gctx->gcm.mres) % 16;
3085
3086 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
3087 return -1;
3088
3089 bulk = AES_gcm_encrypt(in + res,
3090 out + res, len - res,
3091 gctx->gcm.key, gctx->gcm.Yi.c,
3092 gctx->gcm.Xi.u);
3093 gctx->gcm.len.u[1] += bulk;
3094 bulk += res;
3095 }
3096#endif
3097 if (CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm,
3098 in + bulk,
3099 out + bulk,
3100 len - bulk, gctx->ctr))
3101 return -1;
3102 } else {
3103 size_t bulk = 0;
3104#if defined(AES_GCM_ASM2)
3105 if (len >= 32 && AES_GCM_ASM2(gctx)) {
3106 size_t res = (16 - gctx->gcm.mres) % 16;
3107
3108 if (CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
3109 return -1;
3110
3111 bulk = AES_gcm_encrypt(in + res,
3112 out + res, len - res,
3113 gctx->gcm.key, gctx->gcm.Yi.c,
3114 gctx->gcm.Xi.u);
3115 gctx->gcm.len.u[1] += bulk;
3116 bulk += res;
3117 }
3118#endif
3119 if (CRYPTO_gcm128_encrypt(&gctx->gcm,
3120 in + bulk, out + bulk, len - bulk))
3121 return -1;
3122 }
3123 } else {
3124 if (gctx->ctr) {
3125 size_t bulk = 0;
3126#if defined(AES_GCM_ASM)
3127 if (len >= 16 && AES_GCM_ASM(gctx)) {
3128 size_t res = (16 - gctx->gcm.mres) % 16;
3129
3130 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3131 return -1;
3132
3133 bulk = AES_gcm_decrypt(in + res,
3134 out + res, len - res,
3135 gctx->gcm.key,
3136 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3137 gctx->gcm.len.u[1] += bulk;
3138 bulk += res;
3139 }
3140#endif
3141 if (CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm,
3142 in + bulk,
3143 out + bulk,
3144 len - bulk, gctx->ctr))
3145 return -1;
3146 } else {
3147 size_t bulk = 0;
3148#if defined(AES_GCM_ASM2)
3149 if (len >= 16 && AES_GCM_ASM2(gctx)) {
3150 size_t res = (16 - gctx->gcm.mres) % 16;
3151
3152 if (CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
3153 return -1;
3154
3155 bulk = AES_gcm_decrypt(in + res,
3156 out + res, len - res,
3157 gctx->gcm.key,
3158 gctx->gcm.Yi.c, gctx->gcm.Xi.u);
3159 gctx->gcm.len.u[1] += bulk;
3160 bulk += res;
3161 }
3162#endif
3163 if (CRYPTO_gcm128_decrypt(&gctx->gcm,
3164 in + bulk, out + bulk, len - bulk))
3165 return -1;
3166 }
3167 }
3168 return len;
3169 } else {
3170 if (!ctx->encrypt) {
3171 if (gctx->taglen < 0)
3172 return -1;
3173 if (CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0)
3174 return -1;
3175 gctx->iv_set = 0;
3176 return 0;
3177 }
3178 CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, 16);
3179 gctx->taglen = 16;
3180 /* Don't reuse the IV */
3181 gctx->iv_set = 0;
3182 return 0;
3183 }
3184
3185}
3186
3187#define CUSTOM_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 \
3188 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3189 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3190 | EVP_CIPH_CUSTOM_COPY | EVP_CIPH_CUSTOM_IV_LENGTH)
3191
3192BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, gcm, GCM,
3193 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3194BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, gcm, GCM,
3195 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3196BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, gcm, GCM,
3197 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3198
3199static int aes_xts_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3200{
3201 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX, c);
3202
3203 if (type == EVP_CTRL_COPY) {
3204 EVP_CIPHER_CTX *out = ptr;
3205 EVP_AES_XTS_CTX *xctx_out = EVP_C_DATA(EVP_AES_XTS_CTX,out);
3206
3207 if (xctx->xts.key1) {
3208 if (xctx->xts.key1 != &xctx->ks1)
3209 return 0;
3210 xctx_out->xts.key1 = &xctx_out->ks1;
3211 }
3212 if (xctx->xts.key2) {
3213 if (xctx->xts.key2 != &xctx->ks2)
3214 return 0;
3215 xctx_out->xts.key2 = &xctx_out->ks2;
3216 }
3217 return 1;
3218 } else if (type != EVP_CTRL_INIT)
3219 return -1;
3220 /* key1 and key2 are used as an indicator both key and IV are set */
3221 xctx->xts.key1 = NULL;
3222 xctx->xts.key2 = NULL;
3223 return 1;
3224}
3225
3226static int aes_xts_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3227 const unsigned char *iv, int enc)
3228{
3229 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3230
3231 if (iv == NULL && key == NULL)
3232 return 1;
3233
3234 if (key != NULL) {
3235 do {
3236 /* The key is two half length keys in reality */
3237 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx);
3238 const int bytes = keylen / 2;
3239 const int bits = bytes * 8;
3240
3241 if (keylen <= 0) {
3242 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3243 return 0;
3244 }
3245 /*
3246 * Verify that the two keys are different.
3247 *
3248 * This addresses the vulnerability described in Rogaway's
3249 * September 2004 paper:
3250 *
3251 * "Efficient Instantiations of Tweakable Blockciphers and
3252 * Refinements to Modes OCB and PMAC".
3253 * (http://web.cs.ucdavis.edu/~rogaway/papers/offsets.pdf)
3254 *
3255 * FIPS 140-2 IG A.9 XTS-AES Key Generation Requirements states
3256 * that:
3257 * "The check for Key_1 != Key_2 shall be done at any place
3258 * BEFORE using the keys in the XTS-AES algorithm to process
3259 * data with them."
3260 */
3261 if ((!allow_insecure_decrypt || enc)
3262 && CRYPTO_memcmp(key, key + bytes, bytes) == 0) {
3263 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DUPLICATED_KEYS);
3264 return 0;
3265 }
3266
3267#ifdef AES_XTS_ASM
3268 xctx->stream = enc ? AES_xts_encrypt : AES_xts_decrypt;
3269#else
3270 xctx->stream = NULL;
3271#endif
3272 /* key_len is two AES keys */
3273#ifdef HWAES_CAPABLE
3274 if (HWAES_CAPABLE) {
3275 if (enc) {
3276 HWAES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3277 xctx->xts.block1 = (block128_f) HWAES_encrypt;
3278# ifdef HWAES_xts_encrypt
3279 xctx->stream = HWAES_xts_encrypt;
3280# endif
3281 } else {
3282 HWAES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3283 xctx->xts.block1 = (block128_f) HWAES_decrypt;
3284# ifdef HWAES_xts_decrypt
3285 xctx->stream = HWAES_xts_decrypt;
3286#endif
3287 }
3288
3289 HWAES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3290 xctx->xts.block2 = (block128_f) HWAES_encrypt;
3291
3292 xctx->xts.key1 = &xctx->ks1;
3293 break;
3294 } else
3295#endif
3296#ifdef BSAES_CAPABLE
3297 if (BSAES_CAPABLE)
3298 xctx->stream = enc ? ossl_bsaes_xts_encrypt : ossl_bsaes_xts_decrypt;
3299 else
3300#endif
3301#ifdef VPAES_CAPABLE
3302 if (VPAES_CAPABLE) {
3303 if (enc) {
3304 vpaes_set_encrypt_key(key, bits, &xctx->ks1.ks);
3305 xctx->xts.block1 = (block128_f) vpaes_encrypt;
3306 } else {
3307 vpaes_set_decrypt_key(key, bits, &xctx->ks1.ks);
3308 xctx->xts.block1 = (block128_f) vpaes_decrypt;
3309 }
3310
3311 vpaes_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3312 xctx->xts.block2 = (block128_f) vpaes_encrypt;
3313
3314 xctx->xts.key1 = &xctx->ks1;
3315 break;
3316 } else
3317#endif
3318 (void)0; /* terminate potentially open 'else' */
3319
3320 if (enc) {
3321 AES_set_encrypt_key(key, bits, &xctx->ks1.ks);
3322 xctx->xts.block1 = (block128_f) AES_encrypt;
3323 } else {
3324 AES_set_decrypt_key(key, bits, &xctx->ks1.ks);
3325 xctx->xts.block1 = (block128_f) AES_decrypt;
3326 }
3327
3328 AES_set_encrypt_key(key + bytes, bits, &xctx->ks2.ks);
3329 xctx->xts.block2 = (block128_f) AES_encrypt;
3330
3331 xctx->xts.key1 = &xctx->ks1;
3332 } while (0);
3333 }
3334
3335 if (iv) {
3336 xctx->xts.key2 = &xctx->ks2;
3337 memcpy(ctx->iv, iv, 16);
3338 }
3339
3340 return 1;
3341}
3342
3343static int aes_xts_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3344 const unsigned char *in, size_t len)
3345{
3346 EVP_AES_XTS_CTX *xctx = EVP_C_DATA(EVP_AES_XTS_CTX,ctx);
3347
3348 if (xctx->xts.key1 == NULL
3349 || xctx->xts.key2 == NULL
3350 || out == NULL
3351 || in == NULL
3352 || len < AES_BLOCK_SIZE)
3353 return 0;
3354
3355 /*
3356 * Impose a limit of 2^20 blocks per data unit as specified by
3357 * IEEE Std 1619-2018. The earlier and obsolete IEEE Std 1619-2007
3358 * indicated that this was a SHOULD NOT rather than a MUST NOT.
3359 * NIST SP 800-38E mandates the same limit.
3360 */
3361 if (len > XTS_MAX_BLOCKS_PER_DATA_UNIT * AES_BLOCK_SIZE) {
3362 ERR_raise(ERR_LIB_EVP, EVP_R_XTS_DATA_UNIT_IS_TOO_LARGE);
3363 return 0;
3364 }
3365
3366 if (xctx->stream)
3367 (*xctx->stream) (in, out, len,
3368 xctx->xts.key1, xctx->xts.key2,
3369 ctx->iv);
3370 else if (CRYPTO_xts128_encrypt(&xctx->xts, ctx->iv, in, out, len,
3371 EVP_CIPHER_CTX_is_encrypting(ctx)))
3372 return 0;
3373 return 1;
3374}
3375
3376#define aes_xts_cleanup NULL
3377
3378#define XTS_FLAGS (EVP_CIPH_FLAG_DEFAULT_ASN1 | EVP_CIPH_CUSTOM_IV \
3379 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
3380 | EVP_CIPH_CUSTOM_COPY)
3381
3382BLOCK_CIPHER_custom(NID_aes, 128, 1, 16, xts, XTS, XTS_FLAGS)
3383BLOCK_CIPHER_custom(NID_aes, 256, 1, 16, xts, XTS, XTS_FLAGS)
3384
3385static int aes_ccm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3386{
3387 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,c);
3388 switch (type) {
3389 case EVP_CTRL_INIT:
3390 cctx->key_set = 0;
3391 cctx->iv_set = 0;
3392 cctx->L = 8;
3393 cctx->M = 12;
3394 cctx->tag_set = 0;
3395 cctx->len_set = 0;
3396 cctx->tls_aad_len = -1;
3397 return 1;
3398
3399 case EVP_CTRL_GET_IVLEN:
3400 *(int *)ptr = 15 - cctx->L;
3401 return 1;
3402
3403 case EVP_CTRL_AEAD_TLS1_AAD:
3404 /* Save the AAD for later use */
3405 if (arg != EVP_AEAD_TLS1_AAD_LEN)
3406 return 0;
3407 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3408 cctx->tls_aad_len = arg;
3409 {
3410 uint16_t len =
3411 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] << 8
3412 | EVP_CIPHER_CTX_buf_noconst(c)[arg - 1];
3413 /* Correct length for explicit IV */
3414 if (len < EVP_CCM_TLS_EXPLICIT_IV_LEN)
3415 return 0;
3416 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
3417 /* If decrypting correct for tag too */
3418 if (!EVP_CIPHER_CTX_is_encrypting(c)) {
3419 if (len < cctx->M)
3420 return 0;
3421 len -= cctx->M;
3422 }
3423 EVP_CIPHER_CTX_buf_noconst(c)[arg - 2] = len >> 8;
3424 EVP_CIPHER_CTX_buf_noconst(c)[arg - 1] = len & 0xff;
3425 }
3426 /* Extra padding: tag appended to record */
3427 return cctx->M;
3428
3429 case EVP_CTRL_CCM_SET_IV_FIXED:
3430 /* Sanity check length */
3431 if (arg != EVP_CCM_TLS_FIXED_IV_LEN)
3432 return 0;
3433 /* Just copy to first part of IV */
3434 memcpy(c->iv, ptr, arg);
3435 return 1;
3436
3437 case EVP_CTRL_AEAD_SET_IVLEN:
3438 arg = 15 - arg;
3439 /* fall through */
3440 case EVP_CTRL_CCM_SET_L:
3441 if (arg < 2 || arg > 8)
3442 return 0;
3443 cctx->L = arg;
3444 return 1;
3445
3446 case EVP_CTRL_AEAD_SET_TAG:
3447 if ((arg & 1) || arg < 4 || arg > 16)
3448 return 0;
3449 if (EVP_CIPHER_CTX_is_encrypting(c) && ptr)
3450 return 0;
3451 if (ptr) {
3452 cctx->tag_set = 1;
3453 memcpy(EVP_CIPHER_CTX_buf_noconst(c), ptr, arg);
3454 }
3455 cctx->M = arg;
3456 return 1;
3457
3458 case EVP_CTRL_AEAD_GET_TAG:
3459 if (!EVP_CIPHER_CTX_is_encrypting(c) || !cctx->tag_set)
3460 return 0;
3461 if (!CRYPTO_ccm128_tag(&cctx->ccm, ptr, (size_t)arg))
3462 return 0;
3463 cctx->tag_set = 0;
3464 cctx->iv_set = 0;
3465 cctx->len_set = 0;
3466 return 1;
3467
3468 case EVP_CTRL_COPY:
3469 {
3470 EVP_CIPHER_CTX *out = ptr;
3471 EVP_AES_CCM_CTX *cctx_out = EVP_C_DATA(EVP_AES_CCM_CTX,out);
3472 if (cctx->ccm.key) {
3473 if (cctx->ccm.key != &cctx->ks)
3474 return 0;
3475 cctx_out->ccm.key = &cctx_out->ks;
3476 }
3477 return 1;
3478 }
3479
3480 default:
3481 return -1;
3482
3483 }
3484}
3485
3486static int aes_ccm_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3487 const unsigned char *iv, int enc)
3488{
3489 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3490
3491 if (iv == NULL && key == NULL)
3492 return 1;
3493
3494 if (key != NULL) {
3495 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3496
3497 if (keylen <= 0) {
3498 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3499 return 0;
3500 }
3501 do {
3502#ifdef HWAES_CAPABLE
3503 if (HWAES_CAPABLE) {
3504 HWAES_set_encrypt_key(key, keylen, &cctx->ks.ks);
3505
3506 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3507 &cctx->ks, (block128_f) HWAES_encrypt);
3508 cctx->str = NULL;
3509 cctx->key_set = 1;
3510 break;
3511 } else
3512#endif
3513#ifdef VPAES_CAPABLE
3514 if (VPAES_CAPABLE) {
3515 vpaes_set_encrypt_key(key, keylen, &cctx->ks.ks);
3516 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3517 &cctx->ks, (block128_f) vpaes_encrypt);
3518 cctx->str = NULL;
3519 cctx->key_set = 1;
3520 break;
3521 }
3522#endif
3523 AES_set_encrypt_key(key, keylen, &cctx->ks.ks);
3524 CRYPTO_ccm128_init(&cctx->ccm, cctx->M, cctx->L,
3525 &cctx->ks, (block128_f) AES_encrypt);
3526 cctx->str = NULL;
3527 cctx->key_set = 1;
3528 } while (0);
3529 }
3530 if (iv != NULL) {
3531 memcpy(ctx->iv, iv, 15 - cctx->L);
3532 cctx->iv_set = 1;
3533 }
3534 return 1;
3535}
3536
3537static int aes_ccm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3538 const unsigned char *in, size_t len)
3539{
3540 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3541 CCM128_CONTEXT *ccm = &cctx->ccm;
3542 /* Encrypt/decrypt must be performed in place */
3543 if (out != in || len < (EVP_CCM_TLS_EXPLICIT_IV_LEN + (size_t)cctx->M))
3544 return -1;
3545 /* If encrypting set explicit IV from sequence number (start of AAD) */
3546 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3547 memcpy(out, EVP_CIPHER_CTX_buf_noconst(ctx),
3548 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3549 /* Get rest of IV from explicit IV */
3550 memcpy(ctx->iv + EVP_CCM_TLS_FIXED_IV_LEN, in,
3551 EVP_CCM_TLS_EXPLICIT_IV_LEN);
3552 /* Correct length value */
3553 len -= EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3554 if (CRYPTO_ccm128_setiv(ccm, ctx->iv, 15 - cctx->L,
3555 len))
3556 return -1;
3557 /* Use saved AAD */
3558 CRYPTO_ccm128_aad(ccm, EVP_CIPHER_CTX_buf_noconst(ctx),
3559 cctx->tls_aad_len);
3560 /* Fix buffer to point to payload */
3561 in += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3562 out += EVP_CCM_TLS_EXPLICIT_IV_LEN;
3563 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3564 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3565 cctx->str) :
3566 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3567 return -1;
3568 if (!CRYPTO_ccm128_tag(ccm, out + len, cctx->M))
3569 return -1;
3570 return len + EVP_CCM_TLS_EXPLICIT_IV_LEN + cctx->M;
3571 } else {
3572 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3573 cctx->str) :
3574 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3575 unsigned char tag[16];
3576 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3577 if (!CRYPTO_memcmp(tag, in + len, cctx->M))
3578 return len;
3579 }
3580 }
3581 OPENSSL_cleanse(out, len);
3582 return -1;
3583 }
3584}
3585
3586static int aes_ccm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3587 const unsigned char *in, size_t len)
3588{
3589 EVP_AES_CCM_CTX *cctx = EVP_C_DATA(EVP_AES_CCM_CTX,ctx);
3590 CCM128_CONTEXT *ccm = &cctx->ccm;
3591 /* If not set up, return error */
3592 if (!cctx->key_set)
3593 return -1;
3594
3595 if (cctx->tls_aad_len >= 0)
3596 return aes_ccm_tls_cipher(ctx, out, in, len);
3597
3598 /* EVP_*Final() doesn't return any data */
3599 if (in == NULL && out != NULL)
3600 return 0;
3601
3602 if (!cctx->iv_set)
3603 return -1;
3604
3605 if (!out) {
3606 if (!in) {
3607 if (CRYPTO_ccm128_setiv(ccm, ctx->iv,
3608 15 - cctx->L, len))
3609 return -1;
3610 cctx->len_set = 1;
3611 return len;
3612 }
3613 /* If have AAD need message length */
3614 if (!cctx->len_set && len)
3615 return -1;
3616 CRYPTO_ccm128_aad(ccm, in, len);
3617 return len;
3618 }
3619
3620 /* The tag must be set before actually decrypting data */
3621 if (!EVP_CIPHER_CTX_is_encrypting(ctx) && !cctx->tag_set)
3622 return -1;
3623
3624 /* If not set length yet do it */
3625 if (!cctx->len_set) {
3626 if (CRYPTO_ccm128_setiv(ccm, ctx->iv, 15 - cctx->L, len))
3627 return -1;
3628 cctx->len_set = 1;
3629 }
3630 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3631 if (cctx->str ? CRYPTO_ccm128_encrypt_ccm64(ccm, in, out, len,
3632 cctx->str) :
3633 CRYPTO_ccm128_encrypt(ccm, in, out, len))
3634 return -1;
3635 cctx->tag_set = 1;
3636 return len;
3637 } else {
3638 int rv = -1;
3639 if (cctx->str ? !CRYPTO_ccm128_decrypt_ccm64(ccm, in, out, len,
3640 cctx->str) :
3641 !CRYPTO_ccm128_decrypt(ccm, in, out, len)) {
3642 unsigned char tag[16];
3643 if (CRYPTO_ccm128_tag(ccm, tag, cctx->M)) {
3644 if (!CRYPTO_memcmp(tag, EVP_CIPHER_CTX_buf_noconst(ctx),
3645 cctx->M))
3646 rv = len;
3647 }
3648 }
3649 if (rv == -1)
3650 OPENSSL_cleanse(out, len);
3651 cctx->iv_set = 0;
3652 cctx->tag_set = 0;
3653 cctx->len_set = 0;
3654 return rv;
3655 }
3656}
3657
3658#define aes_ccm_cleanup NULL
3659
3660BLOCK_CIPHER_custom(NID_aes, 128, 1, 12, ccm, CCM,
3661 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3662BLOCK_CIPHER_custom(NID_aes, 192, 1, 12, ccm, CCM,
3663 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3664BLOCK_CIPHER_custom(NID_aes, 256, 1, 12, ccm, CCM,
3665 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
3666
3667typedef struct {
3668 union {
3669 OSSL_UNION_ALIGN;
3670 AES_KEY ks;
3671 } ks;
3672 /* Indicates if IV has been set */
3673 unsigned char *iv;
3674} EVP_AES_WRAP_CTX;
3675
3676static int aes_wrap_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3677 const unsigned char *iv, int enc)
3678{
3679 int len;
3680 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3681
3682 if (iv == NULL && key == NULL)
3683 return 1;
3684 if (key != NULL) {
3685 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3686
3687 if (keylen <= 0) {
3688 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3689 return 0;
3690 }
3691 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3692 AES_set_encrypt_key(key, keylen, &wctx->ks.ks);
3693 else
3694 AES_set_decrypt_key(key, keylen, &wctx->ks.ks);
3695 if (iv == NULL)
3696 wctx->iv = NULL;
3697 }
3698 if (iv != NULL) {
3699 if ((len = EVP_CIPHER_CTX_get_iv_length(ctx)) < 0)
3700 return 0;
3701 memcpy(ctx->iv, iv, len);
3702 wctx->iv = ctx->iv;
3703 }
3704 return 1;
3705}
3706
3707static int aes_wrap_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
3708 const unsigned char *in, size_t inlen)
3709{
3710 EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx);
3711 size_t rv;
3712 /* AES wrap with padding has IV length of 4, without padding 8 */
3713 int pad = EVP_CIPHER_CTX_get_iv_length(ctx) == 4;
3714 /* No final operation so always return zero length */
3715 if (!in)
3716 return 0;
3717 /* Input length must always be non-zero */
3718 if (!inlen)
3719 return -1;
3720 /* If decrypting need at least 16 bytes and multiple of 8 */
3721 if (!EVP_CIPHER_CTX_is_encrypting(ctx) && (inlen < 16 || inlen & 0x7))
3722 return -1;
3723 /* If not padding input must be multiple of 8 */
3724 if (!pad && inlen & 0x7)
3725 return -1;
3726 if (ossl_is_partially_overlapping(out, in, inlen)) {
3727 ERR_raise(ERR_LIB_EVP, EVP_R_PARTIALLY_OVERLAPPING);
3728 return 0;
3729 }
3730 if (!out) {
3731 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
3732 /* If padding round up to multiple of 8 */
3733 if (pad)
3734 inlen = (inlen + 7) / 8 * 8;
3735 /* 8 byte prefix */
3736 return inlen + 8;
3737 } else {
3738 /*
3739 * If not padding output will be exactly 8 bytes smaller than
3740 * input. If padding it will be at least 8 bytes smaller but we
3741 * don't know how much.
3742 */
3743 return inlen - 8;
3744 }
3745 }
3746 if (pad) {
3747 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3748 rv = CRYPTO_128_wrap_pad(&wctx->ks.ks, wctx->iv,
3749 out, in, inlen,
3750 (block128_f) AES_encrypt);
3751 else
3752 rv = CRYPTO_128_unwrap_pad(&wctx->ks.ks, wctx->iv,
3753 out, in, inlen,
3754 (block128_f) AES_decrypt);
3755 } else {
3756 if (EVP_CIPHER_CTX_is_encrypting(ctx))
3757 rv = CRYPTO_128_wrap(&wctx->ks.ks, wctx->iv,
3758 out, in, inlen, (block128_f) AES_encrypt);
3759 else
3760 rv = CRYPTO_128_unwrap(&wctx->ks.ks, wctx->iv,
3761 out, in, inlen, (block128_f) AES_decrypt);
3762 }
3763 return rv ? (int)rv : -1;
3764}
3765
3766#define WRAP_FLAGS (EVP_CIPH_WRAP_MODE \
3767 | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
3768 | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_FLAG_DEFAULT_ASN1)
3769
3770static const EVP_CIPHER aes_128_wrap = {
3771 NID_id_aes128_wrap,
3772 8, 16, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3773 aes_wrap_init_key, aes_wrap_cipher,
3774 NULL,
3775 sizeof(EVP_AES_WRAP_CTX),
3776 NULL, NULL, NULL, NULL
3777};
3778
3779const EVP_CIPHER *EVP_aes_128_wrap(void)
3780{
3781 return &aes_128_wrap;
3782}
3783
3784static const EVP_CIPHER aes_192_wrap = {
3785 NID_id_aes192_wrap,
3786 8, 24, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3787 aes_wrap_init_key, aes_wrap_cipher,
3788 NULL,
3789 sizeof(EVP_AES_WRAP_CTX),
3790 NULL, NULL, NULL, NULL
3791};
3792
3793const EVP_CIPHER *EVP_aes_192_wrap(void)
3794{
3795 return &aes_192_wrap;
3796}
3797
3798static const EVP_CIPHER aes_256_wrap = {
3799 NID_id_aes256_wrap,
3800 8, 32, 8, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3801 aes_wrap_init_key, aes_wrap_cipher,
3802 NULL,
3803 sizeof(EVP_AES_WRAP_CTX),
3804 NULL, NULL, NULL, NULL
3805};
3806
3807const EVP_CIPHER *EVP_aes_256_wrap(void)
3808{
3809 return &aes_256_wrap;
3810}
3811
3812static const EVP_CIPHER aes_128_wrap_pad = {
3813 NID_id_aes128_wrap_pad,
3814 8, 16, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3815 aes_wrap_init_key, aes_wrap_cipher,
3816 NULL,
3817 sizeof(EVP_AES_WRAP_CTX),
3818 NULL, NULL, NULL, NULL
3819};
3820
3821const EVP_CIPHER *EVP_aes_128_wrap_pad(void)
3822{
3823 return &aes_128_wrap_pad;
3824}
3825
3826static const EVP_CIPHER aes_192_wrap_pad = {
3827 NID_id_aes192_wrap_pad,
3828 8, 24, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3829 aes_wrap_init_key, aes_wrap_cipher,
3830 NULL,
3831 sizeof(EVP_AES_WRAP_CTX),
3832 NULL, NULL, NULL, NULL
3833};
3834
3835const EVP_CIPHER *EVP_aes_192_wrap_pad(void)
3836{
3837 return &aes_192_wrap_pad;
3838}
3839
3840static const EVP_CIPHER aes_256_wrap_pad = {
3841 NID_id_aes256_wrap_pad,
3842 8, 32, 4, WRAP_FLAGS, EVP_ORIG_GLOBAL,
3843 aes_wrap_init_key, aes_wrap_cipher,
3844 NULL,
3845 sizeof(EVP_AES_WRAP_CTX),
3846 NULL, NULL, NULL, NULL
3847};
3848
3849const EVP_CIPHER *EVP_aes_256_wrap_pad(void)
3850{
3851 return &aes_256_wrap_pad;
3852}
3853
3854#ifndef OPENSSL_NO_OCB
3855static int aes_ocb_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr)
3856{
3857 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
3858 EVP_CIPHER_CTX *newc;
3859 EVP_AES_OCB_CTX *new_octx;
3860
3861 switch (type) {
3862 case EVP_CTRL_INIT:
3863 octx->key_set = 0;
3864 octx->iv_set = 0;
3865 octx->ivlen = EVP_CIPHER_get_iv_length(c->cipher);
3866 octx->iv = c->iv;
3867 octx->taglen = 16;
3868 octx->data_buf_len = 0;
3869 octx->aad_buf_len = 0;
3870 return 1;
3871
3872 case EVP_CTRL_GET_IVLEN:
3873 *(int *)ptr = octx->ivlen;
3874 return 1;
3875
3876 case EVP_CTRL_AEAD_SET_IVLEN:
3877 /* IV len must be 1 to 15 */
3878 if (arg <= 0 || arg > 15)
3879 return 0;
3880
3881 octx->ivlen = arg;
3882 return 1;
3883
3884 case EVP_CTRL_AEAD_SET_TAG:
3885 if (ptr == NULL) {
3886 /* Tag len must be 0 to 16 */
3887 if (arg < 0 || arg > 16)
3888 return 0;
3889
3890 octx->taglen = arg;
3891 return 1;
3892 }
3893 if (arg != octx->taglen || EVP_CIPHER_CTX_is_encrypting(c))
3894 return 0;
3895 memcpy(octx->tag, ptr, arg);
3896 return 1;
3897
3898 case EVP_CTRL_AEAD_GET_TAG:
3899 if (arg != octx->taglen || !EVP_CIPHER_CTX_is_encrypting(c))
3900 return 0;
3901
3902 memcpy(ptr, octx->tag, arg);
3903 return 1;
3904
3905 case EVP_CTRL_COPY:
3906 newc = (EVP_CIPHER_CTX *)ptr;
3907 new_octx = EVP_C_DATA(EVP_AES_OCB_CTX,newc);
3908 return CRYPTO_ocb128_copy_ctx(&new_octx->ocb, &octx->ocb,
3909 &new_octx->ksenc.ks,
3910 &new_octx->ksdec.ks);
3911
3912 default:
3913 return -1;
3914
3915 }
3916}
3917
3918static int aes_ocb_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
3919 const unsigned char *iv, int enc)
3920{
3921 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
3922
3923 if (iv == NULL && key == NULL)
3924 return 1;
3925
3926 if (key != NULL) {
3927 const int keylen = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
3928
3929 if (keylen <= 0) {
3930 ERR_raise(ERR_LIB_EVP, EVP_R_INVALID_KEY_LENGTH);
3931 return 0;
3932 }
3933 do {
3934 /*
3935 * We set both the encrypt and decrypt key here because decrypt
3936 * needs both. We could possibly optimise to remove setting the
3937 * decrypt for an encryption operation.
3938 */
3939# ifdef HWAES_CAPABLE
3940 if (HWAES_CAPABLE) {
3941 HWAES_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3942 HWAES_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3943 if (!CRYPTO_ocb128_init(&octx->ocb,
3944 &octx->ksenc.ks, &octx->ksdec.ks,
3945 (block128_f) HWAES_encrypt,
3946 (block128_f) HWAES_decrypt,
3947 enc ? HWAES_ocb_encrypt
3948 : HWAES_ocb_decrypt))
3949 return 0;
3950 break;
3951 }
3952# endif
3953# ifdef VPAES_CAPABLE
3954 if (VPAES_CAPABLE) {
3955 vpaes_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3956 vpaes_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3957 if (!CRYPTO_ocb128_init(&octx->ocb,
3958 &octx->ksenc.ks, &octx->ksdec.ks,
3959 (block128_f) vpaes_encrypt,
3960 (block128_f) vpaes_decrypt,
3961 NULL))
3962 return 0;
3963 break;
3964 }
3965# endif
3966 AES_set_encrypt_key(key, keylen, &octx->ksenc.ks);
3967 AES_set_decrypt_key(key, keylen, &octx->ksdec.ks);
3968 if (!CRYPTO_ocb128_init(&octx->ocb,
3969 &octx->ksenc.ks, &octx->ksdec.ks,
3970 (block128_f) AES_encrypt,
3971 (block128_f) AES_decrypt,
3972 NULL))
3973 return 0;
3974 }
3975 while (0);
3976
3977 /*
3978 * If we have an iv we can set it directly, otherwise use saved IV.
3979 */
3980 if (iv == NULL && octx->iv_set)
3981 iv = octx->iv;
3982 if (iv) {
3983 if (CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen)
3984 != 1)
3985 return 0;
3986 octx->iv_set = 1;
3987 }
3988 octx->key_set = 1;
3989 } else {
3990 /* If key set use IV, otherwise copy */
3991 if (octx->key_set)
3992 CRYPTO_ocb128_setiv(&octx->ocb, iv, octx->ivlen, octx->taglen);
3993 else
3994 memcpy(octx->iv, iv, octx->ivlen);
3995 octx->iv_set = 1;
3996 }
3997 return 1;
3998}
3999
4000static int aes_ocb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
4001 const unsigned char *in, size_t len)
4002{
4003 unsigned char *buf;
4004 int *buf_len;
4005 int written_len = 0;
4006 size_t trailing_len;
4007 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,ctx);
4008
4009 /* If IV or Key not set then return error */
4010 if (!octx->iv_set)
4011 return -1;
4012
4013 if (!octx->key_set)
4014 return -1;
4015
4016 if (in != NULL) {
4017 /*
4018 * Need to ensure we are only passing full blocks to low-level OCB
4019 * routines. We do it here rather than in EVP_EncryptUpdate/
4020 * EVP_DecryptUpdate because we need to pass full blocks of AAD too
4021 * and those routines don't support that
4022 */
4023
4024 /* Are we dealing with AAD or normal data here? */
4025 if (out == NULL) {
4026 buf = octx->aad_buf;
4027 buf_len = &(octx->aad_buf_len);
4028 } else {
4029 buf = octx->data_buf;
4030 buf_len = &(octx->data_buf_len);
4031
4032 if (ossl_is_partially_overlapping(out + *buf_len, in, len)) {
4033 ERR_raise(ERR_LIB_EVP, EVP_R_PARTIALLY_OVERLAPPING);
4034 return 0;
4035 }
4036 }
4037
4038 /*
4039 * If we've got a partially filled buffer from a previous call then
4040 * use that data first
4041 */
4042 if (*buf_len > 0) {
4043 unsigned int remaining;
4044
4045 remaining = AES_BLOCK_SIZE - (*buf_len);
4046 if (remaining > len) {
4047 memcpy(buf + (*buf_len), in, len);
4048 *(buf_len) += len;
4049 return 0;
4050 }
4051 memcpy(buf + (*buf_len), in, remaining);
4052
4053 /*
4054 * If we get here we've filled the buffer, so process it
4055 */
4056 len -= remaining;
4057 in += remaining;
4058 if (out == NULL) {
4059 if (!CRYPTO_ocb128_aad(&octx->ocb, buf, AES_BLOCK_SIZE))
4060 return -1;
4061 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4062 if (!CRYPTO_ocb128_encrypt(&octx->ocb, buf, out,
4063 AES_BLOCK_SIZE))
4064 return -1;
4065 } else {
4066 if (!CRYPTO_ocb128_decrypt(&octx->ocb, buf, out,
4067 AES_BLOCK_SIZE))
4068 return -1;
4069 }
4070 written_len = AES_BLOCK_SIZE;
4071 *buf_len = 0;
4072 if (out != NULL)
4073 out += AES_BLOCK_SIZE;
4074 }
4075
4076 /* Do we have a partial block to handle at the end? */
4077 trailing_len = len % AES_BLOCK_SIZE;
4078
4079 /*
4080 * If we've got some full blocks to handle, then process these first
4081 */
4082 if (len != trailing_len) {
4083 if (out == NULL) {
4084 if (!CRYPTO_ocb128_aad(&octx->ocb, in, len - trailing_len))
4085 return -1;
4086 } else if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4087 if (!CRYPTO_ocb128_encrypt
4088 (&octx->ocb, in, out, len - trailing_len))
4089 return -1;
4090 } else {
4091 if (!CRYPTO_ocb128_decrypt
4092 (&octx->ocb, in, out, len - trailing_len))
4093 return -1;
4094 }
4095 written_len += len - trailing_len;
4096 in += len - trailing_len;
4097 }
4098
4099 /* Handle any trailing partial block */
4100 if (trailing_len > 0) {
4101 memcpy(buf, in, trailing_len);
4102 *buf_len = trailing_len;
4103 }
4104
4105 return written_len;
4106 } else {
4107 /*
4108 * First of all empty the buffer of any partial block that we might
4109 * have been provided - both for data and AAD
4110 */
4111 if (octx->data_buf_len > 0) {
4112 if (EVP_CIPHER_CTX_is_encrypting(ctx)) {
4113 if (!CRYPTO_ocb128_encrypt(&octx->ocb, octx->data_buf, out,
4114 octx->data_buf_len))
4115 return -1;
4116 } else {
4117 if (!CRYPTO_ocb128_decrypt(&octx->ocb, octx->data_buf, out,
4118 octx->data_buf_len))
4119 return -1;
4120 }
4121 written_len = octx->data_buf_len;
4122 octx->data_buf_len = 0;
4123 }
4124 if (octx->aad_buf_len > 0) {
4125 if (!CRYPTO_ocb128_aad
4126 (&octx->ocb, octx->aad_buf, octx->aad_buf_len))
4127 return -1;
4128 octx->aad_buf_len = 0;
4129 }
4130 /* If decrypting then verify */
4131 if (!EVP_CIPHER_CTX_is_encrypting(ctx)) {
4132 if (octx->taglen < 0)
4133 return -1;
4134 if (CRYPTO_ocb128_finish(&octx->ocb,
4135 octx->tag, octx->taglen) != 0)
4136 return -1;
4137 octx->iv_set = 0;
4138 return written_len;
4139 }
4140 /* If encrypting then just get the tag */
4141 if (CRYPTO_ocb128_tag(&octx->ocb, octx->tag, 16) != 1)
4142 return -1;
4143 /* Don't reuse the IV */
4144 octx->iv_set = 0;
4145 return written_len;
4146 }
4147}
4148
4149static int aes_ocb_cleanup(EVP_CIPHER_CTX *c)
4150{
4151 EVP_AES_OCB_CTX *octx = EVP_C_DATA(EVP_AES_OCB_CTX,c);
4152 CRYPTO_ocb128_cleanup(&octx->ocb);
4153 return 1;
4154}
4155
4156BLOCK_CIPHER_custom(NID_aes, 128, 16, 12, ocb, OCB,
4157 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4158BLOCK_CIPHER_custom(NID_aes, 192, 16, 12, ocb, OCB,
4159 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4160BLOCK_CIPHER_custom(NID_aes, 256, 16, 12, ocb, OCB,
4161 EVP_CIPH_FLAG_AEAD_CIPHER | CUSTOM_FLAGS)
4162#endif /* OPENSSL_NO_OCB */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette