VirtualBox

source: vbox/trunk/src/libs/openssl-3.0.3/engines/e_padlock.c@ 95224

最後變更 在這個檔案從95224是 94082,由 vboxsync 提交於 3 年 前

libs/openssl-3.0.1: started applying and adjusting our OpenSSL changes to 3.0.1. bugref:10128

檔案大小: 22.8 KB
 
1/*
2 * Copyright 2004-2021 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10/*
11 * This file uses the low level AES and engine functions (which are deprecated
12 * for non-internal use) in order to implement the padlock engine AES ciphers.
13 */
14#define OPENSSL_SUPPRESS_DEPRECATED
15
16#include <stdio.h>
17#include <string.h>
18
19#include <openssl/opensslconf.h>
20#include <openssl/crypto.h>
21#include <openssl/engine.h>
22#include <openssl/evp.h>
23#include <openssl/aes.h>
24#include <openssl/rand.h>
25#include <openssl/err.h>
26#include <openssl/modes.h>
27
28#ifndef OPENSSL_NO_PADLOCKENG
29
30/*
31 * VIA PadLock AES is available *ONLY* on some x86 CPUs. Not only that it
32 * doesn't exist elsewhere, but it even can't be compiled on other platforms!
33 */
34
35# undef COMPILE_PADLOCKENG
36# if defined(PADLOCK_ASM)
37# define COMPILE_PADLOCKENG
38# ifdef OPENSSL_NO_DYNAMIC_ENGINE
39static ENGINE *ENGINE_padlock(void);
40# endif
41# endif
42
43# ifdef OPENSSL_NO_DYNAMIC_ENGINE
44void engine_load_padlock_int(void);
45void engine_load_padlock_int(void)
46{
47/* On non-x86 CPUs it just returns. */
48# ifdef COMPILE_PADLOCKENG
49 ENGINE *toadd = ENGINE_padlock();
50 if (!toadd)
51 return;
52 ERR_set_mark();
53 ENGINE_add(toadd);
54 /*
55 * If the "add" worked, it gets a structural reference. So either way, we
56 * release our just-created reference.
57 */
58 ENGINE_free(toadd);
59 /*
60 * If the "add" didn't work, it was probably a conflict because it was
61 * already added (eg. someone calling ENGINE_load_blah then calling
62 * ENGINE_load_builtin_engines() perhaps).
63 */
64 ERR_pop_to_mark();
65# endif
66}
67
68# endif
69
70# ifdef COMPILE_PADLOCKENG
71
72/* Function for ENGINE detection and control */
73static int padlock_available(void);
74static int padlock_init(ENGINE *e);
75
76/* RNG Stuff */
77static RAND_METHOD padlock_rand;
78
79/* Cipher Stuff */
80static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
81 const int **nids, int nid);
82
83/* Engine names */
84static const char *padlock_id = "padlock";
85static char padlock_name[100];
86
87/* Available features */
88static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
89static int padlock_use_rng = 0; /* Random Number Generator */
90
91/* ===== Engine "management" functions ===== */
92
93/* Prepare the ENGINE structure for registration */
94static int padlock_bind_helper(ENGINE *e)
95{
96 /* Check available features */
97 padlock_available();
98
99 /*
100 * RNG is currently disabled for reasons discussed in commentary just
101 * before padlock_rand_bytes function.
102 */
103 padlock_use_rng = 0;
104
105 /* Generate a nice engine name with available features */
106 BIO_snprintf(padlock_name, sizeof(padlock_name),
107 "VIA PadLock (%s, %s)",
108 padlock_use_rng ? "RNG" : "no-RNG",
109 padlock_use_ace ? "ACE" : "no-ACE");
110
111 /* Register everything or return with an error */
112 if (!ENGINE_set_id(e, padlock_id) ||
113 !ENGINE_set_name(e, padlock_name) ||
114 !ENGINE_set_init_function(e, padlock_init) ||
115 (padlock_use_ace && !ENGINE_set_ciphers(e, padlock_ciphers)) ||
116 (padlock_use_rng && !ENGINE_set_RAND(e, &padlock_rand))) {
117 return 0;
118 }
119
120 /* Everything looks good */
121 return 1;
122}
123
124# ifdef OPENSSL_NO_DYNAMIC_ENGINE
125/* Constructor */
126static ENGINE *ENGINE_padlock(void)
127{
128 ENGINE *eng = ENGINE_new();
129
130 if (eng == NULL) {
131 return NULL;
132 }
133
134 if (!padlock_bind_helper(eng)) {
135 ENGINE_free(eng);
136 return NULL;
137 }
138
139 return eng;
140}
141# endif
142
143/* Check availability of the engine */
144static int padlock_init(ENGINE *e)
145{
146 return (padlock_use_rng || padlock_use_ace);
147}
148
149/*
150 * This stuff is needed if this ENGINE is being compiled into a
151 * self-contained shared-library.
152 */
153# ifndef OPENSSL_NO_DYNAMIC_ENGINE
154static int padlock_bind_fn(ENGINE *e, const char *id)
155{
156 if (id && (strcmp(id, padlock_id) != 0)) {
157 return 0;
158 }
159
160 if (!padlock_bind_helper(e)) {
161 return 0;
162 }
163
164 return 1;
165}
166
167IMPLEMENT_DYNAMIC_CHECK_FN()
168IMPLEMENT_DYNAMIC_BIND_FN(padlock_bind_fn)
169# endif /* !OPENSSL_NO_DYNAMIC_ENGINE */
170/* ===== Here comes the "real" engine ===== */
171
172/* Some AES-related constants */
173# define AES_BLOCK_SIZE 16
174# define AES_KEY_SIZE_128 16
175# define AES_KEY_SIZE_192 24
176# define AES_KEY_SIZE_256 32
177 /*
178 * Here we store the status information relevant to the current context.
179 */
180 /*
181 * BIG FAT WARNING: Inline assembler in PADLOCK_XCRYPT_ASM() depends on
182 * the order of items in this structure. Don't blindly modify, reorder,
183 * etc!
184 */
185struct padlock_cipher_data {
186 unsigned char iv[AES_BLOCK_SIZE]; /* Initialization vector */
187 union {
188 unsigned int pad[4];
189 struct {
190 int rounds:4;
191 int dgst:1; /* n/a in C3 */
192 int align:1; /* n/a in C3 */
193 int ciphr:1; /* n/a in C3 */
194 unsigned int keygen:1;
195 int interm:1;
196 unsigned int encdec:1;
197 int ksize:2;
198 } b;
199 } cword; /* Control word */
200 AES_KEY ks; /* Encryption key */
201};
202
203/* Interface to assembler module */
204unsigned int padlock_capability(void);
205void padlock_key_bswap(AES_KEY *key);
206void padlock_verify_context(struct padlock_cipher_data *ctx);
207void padlock_reload_key(void);
208void padlock_aes_block(void *out, const void *inp,
209 struct padlock_cipher_data *ctx);
210int padlock_ecb_encrypt(void *out, const void *inp,
211 struct padlock_cipher_data *ctx, size_t len);
212int padlock_cbc_encrypt(void *out, const void *inp,
213 struct padlock_cipher_data *ctx, size_t len);
214int padlock_cfb_encrypt(void *out, const void *inp,
215 struct padlock_cipher_data *ctx, size_t len);
216int padlock_ofb_encrypt(void *out, const void *inp,
217 struct padlock_cipher_data *ctx, size_t len);
218int padlock_ctr32_encrypt(void *out, const void *inp,
219 struct padlock_cipher_data *ctx, size_t len);
220int padlock_xstore(void *out, int edx);
221void padlock_sha1_oneshot(void *ctx, const void *inp, size_t len);
222void padlock_sha1(void *ctx, const void *inp, size_t len);
223void padlock_sha256_oneshot(void *ctx, const void *inp, size_t len);
224void padlock_sha256(void *ctx, const void *inp, size_t len);
225
226/*
227 * Load supported features of the CPU to see if the PadLock is available.
228 */
229static int padlock_available(void)
230{
231 unsigned int edx = padlock_capability();
232
233 /* Fill up some flags */
234 padlock_use_ace = ((edx & (0x3 << 6)) == (0x3 << 6));
235 padlock_use_rng = ((edx & (0x3 << 2)) == (0x3 << 2));
236
237 return padlock_use_ace + padlock_use_rng;
238}
239
240/* ===== AES encryption/decryption ===== */
241
242# if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
243# define NID_aes_128_cfb NID_aes_128_cfb128
244# endif
245
246# if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
247# define NID_aes_128_ofb NID_aes_128_ofb128
248# endif
249
250# if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
251# define NID_aes_192_cfb NID_aes_192_cfb128
252# endif
253
254# if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
255# define NID_aes_192_ofb NID_aes_192_ofb128
256# endif
257
258# if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
259# define NID_aes_256_cfb NID_aes_256_cfb128
260# endif
261
262# if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
263# define NID_aes_256_ofb NID_aes_256_ofb128
264# endif
265
266/* List of supported ciphers. */
267static const int padlock_cipher_nids[] = {
268 NID_aes_128_ecb,
269 NID_aes_128_cbc,
270 NID_aes_128_cfb,
271 NID_aes_128_ofb,
272 NID_aes_128_ctr,
273
274 NID_aes_192_ecb,
275 NID_aes_192_cbc,
276 NID_aes_192_cfb,
277 NID_aes_192_ofb,
278 NID_aes_192_ctr,
279
280 NID_aes_256_ecb,
281 NID_aes_256_cbc,
282 NID_aes_256_cfb,
283 NID_aes_256_ofb,
284 NID_aes_256_ctr
285};
286
287static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids) /
288 sizeof(padlock_cipher_nids[0]));
289
290/* Function prototypes ... */
291static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
292 const unsigned char *iv, int enc);
293
294# define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \
295 ( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) )
296# define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\
297 NEAREST_ALIGNED(EVP_CIPHER_CTX_get_cipher_data(ctx)))
298
299static int
300padlock_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
301 const unsigned char *in_arg, size_t nbytes)
302{
303 return padlock_ecb_encrypt(out_arg, in_arg,
304 ALIGNED_CIPHER_DATA(ctx), nbytes);
305}
306
307static int
308padlock_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
309 const unsigned char *in_arg, size_t nbytes)
310{
311 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
312 int ret;
313
314 memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
315 if ((ret = padlock_cbc_encrypt(out_arg, in_arg, cdata, nbytes)))
316 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
317 return ret;
318}
319
320static int
321padlock_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
322 const unsigned char *in_arg, size_t nbytes)
323{
324 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
325 size_t chunk;
326
327 if ((chunk = EVP_CIPHER_CTX_get_num(ctx))) { /* borrow chunk variable */
328 unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx);
329
330 if (chunk >= AES_BLOCK_SIZE)
331 return 0; /* bogus value */
332
333 if (EVP_CIPHER_CTX_is_encrypting(ctx))
334 while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
335 ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk];
336 chunk++, nbytes--;
337 } else
338 while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
339 unsigned char c = *(in_arg++);
340 *(out_arg++) = c ^ ivp[chunk];
341 ivp[chunk++] = c, nbytes--;
342 }
343
344 EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE);
345 }
346
347 if (nbytes == 0)
348 return 1;
349
350 memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
351
352 if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) {
353 if (!padlock_cfb_encrypt(out_arg, in_arg, cdata, chunk))
354 return 0;
355 nbytes -= chunk;
356 }
357
358 if (nbytes) {
359 unsigned char *ivp = cdata->iv;
360
361 out_arg += chunk;
362 in_arg += chunk;
363 EVP_CIPHER_CTX_set_num(ctx, nbytes);
364 if (cdata->cword.b.encdec) {
365 cdata->cword.b.encdec = 0;
366 padlock_reload_key();
367 padlock_aes_block(ivp, ivp, cdata);
368 cdata->cword.b.encdec = 1;
369 padlock_reload_key();
370 while (nbytes) {
371 unsigned char c = *(in_arg++);
372 *(out_arg++) = c ^ *ivp;
373 *(ivp++) = c, nbytes--;
374 }
375 } else {
376 padlock_reload_key();
377 padlock_aes_block(ivp, ivp, cdata);
378 padlock_reload_key();
379 while (nbytes) {
380 *ivp = *(out_arg++) = *(in_arg++) ^ *ivp;
381 ivp++, nbytes--;
382 }
383 }
384 }
385
386 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
387
388 return 1;
389}
390
391static int
392padlock_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
393 const unsigned char *in_arg, size_t nbytes)
394{
395 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
396 size_t chunk;
397
398 /*
399 * ctx->num is maintained in byte-oriented modes, such as CFB and OFB...
400 */
401 if ((chunk = EVP_CIPHER_CTX_get_num(ctx))) { /* borrow chunk variable */
402 unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx);
403
404 if (chunk >= AES_BLOCK_SIZE)
405 return 0; /* bogus value */
406
407 while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
408 *(out_arg++) = *(in_arg++) ^ ivp[chunk];
409 chunk++, nbytes--;
410 }
411
412 EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE);
413 }
414
415 if (nbytes == 0)
416 return 1;
417
418 memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
419
420 if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) {
421 if (!padlock_ofb_encrypt(out_arg, in_arg, cdata, chunk))
422 return 0;
423 nbytes -= chunk;
424 }
425
426 if (nbytes) {
427 unsigned char *ivp = cdata->iv;
428
429 out_arg += chunk;
430 in_arg += chunk;
431 EVP_CIPHER_CTX_set_num(ctx, nbytes);
432 padlock_reload_key(); /* empirically found */
433 padlock_aes_block(ivp, ivp, cdata);
434 padlock_reload_key(); /* empirically found */
435 while (nbytes) {
436 *(out_arg++) = *(in_arg++) ^ *ivp;
437 ivp++, nbytes--;
438 }
439 }
440
441 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
442
443 return 1;
444}
445
446static void padlock_ctr32_encrypt_glue(const unsigned char *in,
447 unsigned char *out, size_t blocks,
448 struct padlock_cipher_data *ctx,
449 const unsigned char *ivec)
450{
451 memcpy(ctx->iv, ivec, AES_BLOCK_SIZE);
452 padlock_ctr32_encrypt(out, in, ctx, AES_BLOCK_SIZE * blocks);
453}
454
455static int
456padlock_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
457 const unsigned char *in_arg, size_t nbytes)
458{
459 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
460 int n = EVP_CIPHER_CTX_get_num(ctx);
461 unsigned int num;
462
463 if (n < 0)
464 return 0;
465 num = (unsigned int)n;
466
467 CRYPTO_ctr128_encrypt_ctr32(in_arg, out_arg, nbytes,
468 cdata, EVP_CIPHER_CTX_iv_noconst(ctx),
469 EVP_CIPHER_CTX_buf_noconst(ctx), &num,
470 (ctr128_f) padlock_ctr32_encrypt_glue);
471
472 EVP_CIPHER_CTX_set_num(ctx, (size_t)num);
473 return 1;
474}
475
476# define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE
477# define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE
478# define EVP_CIPHER_block_size_OFB 1
479# define EVP_CIPHER_block_size_CFB 1
480# define EVP_CIPHER_block_size_CTR 1
481
482/*
483 * Declaring so many ciphers by hand would be a pain. Instead introduce a bit
484 * of preprocessor magic :-)
485 */
486# define DECLARE_AES_EVP(ksize,lmode,umode) \
487static EVP_CIPHER *_hidden_aes_##ksize##_##lmode = NULL; \
488static const EVP_CIPHER *padlock_aes_##ksize##_##lmode(void) \
489{ \
490 if (_hidden_aes_##ksize##_##lmode == NULL \
491 && ((_hidden_aes_##ksize##_##lmode = \
492 EVP_CIPHER_meth_new(NID_aes_##ksize##_##lmode, \
493 EVP_CIPHER_block_size_##umode, \
494 AES_KEY_SIZE_##ksize)) == NULL \
495 || !EVP_CIPHER_meth_set_iv_length(_hidden_aes_##ksize##_##lmode, \
496 AES_BLOCK_SIZE) \
497 || !EVP_CIPHER_meth_set_flags(_hidden_aes_##ksize##_##lmode, \
498 0 | EVP_CIPH_##umode##_MODE) \
499 || !EVP_CIPHER_meth_set_init(_hidden_aes_##ksize##_##lmode, \
500 padlock_aes_init_key) \
501 || !EVP_CIPHER_meth_set_do_cipher(_hidden_aes_##ksize##_##lmode, \
502 padlock_##lmode##_cipher) \
503 || !EVP_CIPHER_meth_set_impl_ctx_size(_hidden_aes_##ksize##_##lmode, \
504 sizeof(struct padlock_cipher_data) + 16) \
505 || !EVP_CIPHER_meth_set_set_asn1_params(_hidden_aes_##ksize##_##lmode, \
506 EVP_CIPHER_set_asn1_iv) \
507 || !EVP_CIPHER_meth_set_get_asn1_params(_hidden_aes_##ksize##_##lmode, \
508 EVP_CIPHER_get_asn1_iv))) { \
509 EVP_CIPHER_meth_free(_hidden_aes_##ksize##_##lmode); \
510 _hidden_aes_##ksize##_##lmode = NULL; \
511 } \
512 return _hidden_aes_##ksize##_##lmode; \
513}
514
515DECLARE_AES_EVP(128, ecb, ECB)
516DECLARE_AES_EVP(128, cbc, CBC)
517DECLARE_AES_EVP(128, cfb, CFB)
518DECLARE_AES_EVP(128, ofb, OFB)
519DECLARE_AES_EVP(128, ctr, CTR)
520
521DECLARE_AES_EVP(192, ecb, ECB)
522DECLARE_AES_EVP(192, cbc, CBC)
523DECLARE_AES_EVP(192, cfb, CFB)
524DECLARE_AES_EVP(192, ofb, OFB)
525DECLARE_AES_EVP(192, ctr, CTR)
526
527DECLARE_AES_EVP(256, ecb, ECB)
528DECLARE_AES_EVP(256, cbc, CBC)
529DECLARE_AES_EVP(256, cfb, CFB)
530DECLARE_AES_EVP(256, ofb, OFB)
531DECLARE_AES_EVP(256, ctr, CTR)
532
533static int
534padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids,
535 int nid)
536{
537 /* No specific cipher => return a list of supported nids ... */
538 if (!cipher) {
539 *nids = padlock_cipher_nids;
540 return padlock_cipher_nids_num;
541 }
542
543 /* ... or the requested "cipher" otherwise */
544 switch (nid) {
545 case NID_aes_128_ecb:
546 *cipher = padlock_aes_128_ecb();
547 break;
548 case NID_aes_128_cbc:
549 *cipher = padlock_aes_128_cbc();
550 break;
551 case NID_aes_128_cfb:
552 *cipher = padlock_aes_128_cfb();
553 break;
554 case NID_aes_128_ofb:
555 *cipher = padlock_aes_128_ofb();
556 break;
557 case NID_aes_128_ctr:
558 *cipher = padlock_aes_128_ctr();
559 break;
560
561 case NID_aes_192_ecb:
562 *cipher = padlock_aes_192_ecb();
563 break;
564 case NID_aes_192_cbc:
565 *cipher = padlock_aes_192_cbc();
566 break;
567 case NID_aes_192_cfb:
568 *cipher = padlock_aes_192_cfb();
569 break;
570 case NID_aes_192_ofb:
571 *cipher = padlock_aes_192_ofb();
572 break;
573 case NID_aes_192_ctr:
574 *cipher = padlock_aes_192_ctr();
575 break;
576
577 case NID_aes_256_ecb:
578 *cipher = padlock_aes_256_ecb();
579 break;
580 case NID_aes_256_cbc:
581 *cipher = padlock_aes_256_cbc();
582 break;
583 case NID_aes_256_cfb:
584 *cipher = padlock_aes_256_cfb();
585 break;
586 case NID_aes_256_ofb:
587 *cipher = padlock_aes_256_ofb();
588 break;
589 case NID_aes_256_ctr:
590 *cipher = padlock_aes_256_ctr();
591 break;
592
593 default:
594 /* Sorry, we don't support this NID */
595 *cipher = NULL;
596 return 0;
597 }
598
599 return 1;
600}
601
602/* Prepare the encryption key for PadLock usage */
603static int
604padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
605 const unsigned char *iv, int enc)
606{
607 struct padlock_cipher_data *cdata;
608 int key_len = EVP_CIPHER_CTX_get_key_length(ctx) * 8;
609 unsigned long mode = EVP_CIPHER_CTX_get_mode(ctx);
610
611 if (key == NULL)
612 return 0; /* ERROR */
613
614 cdata = ALIGNED_CIPHER_DATA(ctx);
615 memset(cdata, 0, sizeof(*cdata));
616
617 /* Prepare Control word. */
618 if (mode == EVP_CIPH_OFB_MODE || mode == EVP_CIPH_CTR_MODE)
619 cdata->cword.b.encdec = 0;
620 else
621 cdata->cword.b.encdec = (EVP_CIPHER_CTX_is_encrypting(ctx) == 0);
622 cdata->cword.b.rounds = 10 + (key_len - 128) / 32;
623 cdata->cword.b.ksize = (key_len - 128) / 64;
624
625 switch (key_len) {
626 case 128:
627 /*
628 * PadLock can generate an extended key for AES128 in hardware
629 */
630 memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128);
631 cdata->cword.b.keygen = 0;
632 break;
633
634 case 192:
635 case 256:
636 /*
637 * Generate an extended AES key in software. Needed for AES192/AES256
638 */
639 /*
640 * Well, the above applies to Stepping 8 CPUs and is listed as
641 * hardware errata. They most likely will fix it at some point and
642 * then a check for stepping would be due here.
643 */
644 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
645 && !enc)
646 AES_set_decrypt_key(key, key_len, &cdata->ks);
647 else
648 AES_set_encrypt_key(key, key_len, &cdata->ks);
649# ifndef AES_ASM
650 /*
651 * OpenSSL C functions use byte-swapped extended key.
652 */
653 padlock_key_bswap(&cdata->ks);
654# endif
655 cdata->cword.b.keygen = 1;
656 break;
657
658 default:
659 /* ERROR */
660 return 0;
661 }
662
663 /*
664 * This is done to cover for cases when user reuses the
665 * context for new key. The catch is that if we don't do
666 * this, padlock_eas_cipher might proceed with old key...
667 */
668 padlock_reload_key();
669
670 return 1;
671}
672
673/* ===== Random Number Generator ===== */
674/*
675 * This code is not engaged. The reason is that it does not comply
676 * with recommendations for VIA RNG usage for secure applications
677 * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it
678 * provide meaningful error control...
679 */
680/*
681 * Wrapper that provides an interface between the API and the raw PadLock
682 * RNG
683 */
684static int padlock_rand_bytes(unsigned char *output, int count)
685{
686 unsigned int eax, buf;
687
688 while (count >= 8) {
689 eax = padlock_xstore(output, 0);
690 if (!(eax & (1 << 6)))
691 return 0; /* RNG disabled */
692 /* this ---vv--- covers DC bias, Raw Bits and String Filter */
693 if (eax & (0x1F << 10))
694 return 0;
695 if ((eax & 0x1F) == 0)
696 continue; /* no data, retry... */
697 if ((eax & 0x1F) != 8)
698 return 0; /* fatal failure... */
699 output += 8;
700 count -= 8;
701 }
702 while (count > 0) {
703 eax = padlock_xstore(&buf, 3);
704 if (!(eax & (1 << 6)))
705 return 0; /* RNG disabled */
706 /* this ---vv--- covers DC bias, Raw Bits and String Filter */
707 if (eax & (0x1F << 10))
708 return 0;
709 if ((eax & 0x1F) == 0)
710 continue; /* no data, retry... */
711 if ((eax & 0x1F) != 1)
712 return 0; /* fatal failure... */
713 *output++ = (unsigned char)buf;
714 count--;
715 }
716 OPENSSL_cleanse(&buf, sizeof(buf));
717
718 return 1;
719}
720
721/* Dummy but necessary function */
722static int padlock_rand_status(void)
723{
724 return 1;
725}
726
727/* Prepare structure for registration */
728static RAND_METHOD padlock_rand = {
729 NULL, /* seed */
730 padlock_rand_bytes, /* bytes */
731 NULL, /* cleanup */
732 NULL, /* add */
733 padlock_rand_bytes, /* pseudorand */
734 padlock_rand_status, /* rand status */
735};
736
737# endif /* COMPILE_PADLOCKENG */
738#endif /* !OPENSSL_NO_PADLOCKENG */
739
740#if defined(OPENSSL_NO_PADLOCKENG) || !defined(COMPILE_PADLOCKENG)
741# ifndef OPENSSL_NO_DYNAMIC_ENGINE
742OPENSSL_EXPORT
743 int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns);
744OPENSSL_EXPORT
745 int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns)
746{
747 return 0;
748}
749
750IMPLEMENT_DYNAMIC_CHECK_FN()
751# endif
752#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette