summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-04 17:11:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-04 17:11:08 -0700
commit9eb31227cbccd3a37da0f42604f1ab5fc556bc53 (patch)
tree9aa467e620e002bf01cecdd98e3908e0cc3e7221 /arch/x86
parent527cd20771888443b5d8707debe98f62c7a1f596 (diff)
parentf444ec106407d600f17fa1a4bd14f84577401dec (diff)
downloadlinux-9eb31227cbccd3a37da0f42604f1ab5fc556bc53.tar.gz
linux-9eb31227cbccd3a37da0f42604f1ab5fc556bc53.tar.bz2
linux-9eb31227cbccd3a37da0f42604f1ab5fc556bc53.zip
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu: "API: - add AEAD support to crypto engine - allow batch registration in simd Algorithms: - add CFB mode - add speck block cipher - add sm4 block cipher - new test case for crct10dif - improve scheduling latency on ARM - scatter/gather support to gcm in aesni - convert x86 crypto algorithms to skcihper Drivers: - hmac(sha224/sha256) support in inside-secure - aes gcm/ccm support in stm32 - stm32mp1 support in stm32 - ccree driver from staging tree - gcm support over QI in caam - add ks-sa hwrng driver" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (212 commits) crypto: ccree - remove unused enums crypto: ahash - Fix early termination in hash walk crypto: brcm - explicitly cast cipher to hash type crypto: talitos - don't leak pointers to authenc keys crypto: qat - don't leak pointers to authenc keys crypto: picoxcell - don't leak pointers to authenc keys crypto: ixp4xx - don't leak pointers to authenc keys crypto: chelsio - don't leak pointers to authenc keys crypto: caam/qi - don't leak pointers to authenc keys crypto: caam - don't leak pointers to authenc keys crypto: lrw - Free rctx->ext with kzfree crypto: talitos - fix IPsec cipher in length crypto: Deduplicate le32_to_cpu_array() and cpu_to_le32_array() crypto: doc - clarify hash callbacks state machine crypto: api - Keep failed instances alive crypto: api - Make crypto_alg_lookup static crypto: api - Remove unused crypto_type lookup function crypto: chelsio - Remove declaration of static function from header crypto: inside-secure - hmac(sha224) support crypto: inside-secure - hmac(sha256) support ..
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S1404
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c230
-rw-r--r--arch/x86/crypto/blowfish_glue.c230
-rw-r--r--arch/x86/crypto/camellia_aesni_avx2_glue.c491
-rw-r--r--arch/x86/crypto/camellia_aesni_avx_glue.c495
-rw-r--r--arch/x86/crypto/camellia_glue.c356
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c352
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c489
-rw-r--r--arch/x86/crypto/des3_ede_glue.c238
-rw-r--r--arch/x86/crypto/glue_helper.c391
-rw-r--r--arch/x86/crypto/serpent_avx2_glue.c478
-rw-r--r--arch/x86/crypto/serpent_avx_glue.c518
-rw-r--r--arch/x86/crypto/serpent_sse2_glue.c519
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c28
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_ctx.h8
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c27
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_ctx.h8
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c30
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_ctx.h8
-rw-r--r--arch/x86/crypto/twofish_avx_glue.c493
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c339
-rw-r--r--arch/x86/include/asm/crypto/camellia.h16
-rw-r--r--arch/x86/include/asm/crypto/glue_helper.h75
-rw-r--r--arch/x86/include/asm/crypto/serpent-avx.h17
-rw-r--r--arch/x86/include/asm/crypto/twofish.h19
25 files changed, 2283 insertions, 4976 deletions
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 12e8484a8ee7..e762ef417562 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -94,23 +94,30 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
#define STACK_OFFSET 8*3
-#define HashKey 16*0 // store HashKey <<1 mod poly here
-#define HashKey_2 16*1 // store HashKey^2 <<1 mod poly here
-#define HashKey_3 16*2 // store HashKey^3 <<1 mod poly here
-#define HashKey_4 16*3 // store HashKey^4 <<1 mod poly here
-#define HashKey_k 16*4 // store XOR of High 64 bits and Low 64
+
+#define AadHash 16*0
+#define AadLen 16*1
+#define InLen (16*1)+8
+#define PBlockEncKey 16*2
+#define OrigIV 16*3
+#define CurCount 16*4
+#define PBlockLen 16*5
+#define HashKey 16*6 // store HashKey <<1 mod poly here
+#define HashKey_2 16*7 // store HashKey^2 <<1 mod poly here
+#define HashKey_3 16*8 // store HashKey^3 <<1 mod poly here
+#define HashKey_4 16*9 // store HashKey^4 <<1 mod poly here
+#define HashKey_k 16*10 // store XOR of High 64 bits and Low 64
// bits of HashKey <<1 mod poly here
//(for Karatsuba purposes)
-#define HashKey_2_k 16*5 // store XOR of High 64 bits and Low 64
+#define HashKey_2_k 16*11 // store XOR of High 64 bits and Low 64
// bits of HashKey^2 <<1 mod poly here
// (for Karatsuba purposes)
-#define HashKey_3_k 16*6 // store XOR of High 64 bits and Low 64
+#define HashKey_3_k 16*12 // store XOR of High 64 bits and Low 64
// bits of HashKey^3 <<1 mod poly here
// (for Karatsuba purposes)
-#define HashKey_4_k 16*7 // store XOR of High 64 bits and Low 64
+#define HashKey_4_k 16*13 // store XOR of High 64 bits and Low 64
// bits of HashKey^4 <<1 mod poly here
// (for Karatsuba purposes)
-#define VARIABLE_OFFSET 16*8
#define arg1 rdi
#define arg2 rsi
@@ -118,10 +125,11 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
#define arg4 rcx
#define arg5 r8
#define arg6 r9
-#define arg7 STACK_OFFSET+8(%r14)
-#define arg8 STACK_OFFSET+16(%r14)
-#define arg9 STACK_OFFSET+24(%r14)
-#define arg10 STACK_OFFSET+32(%r14)
+#define arg7 STACK_OFFSET+8(%rsp)
+#define arg8 STACK_OFFSET+16(%rsp)
+#define arg9 STACK_OFFSET+24(%rsp)
+#define arg10 STACK_OFFSET+32(%rsp)
+#define arg11 STACK_OFFSET+40(%rsp)
#define keysize 2*15*16(%arg1)
#endif
@@ -171,6 +179,332 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
#define TKEYP T1
#endif
+.macro FUNC_SAVE
+ push %r12
+ push %r13
+ push %r14
+#
+# states of %xmm registers %xmm6:%xmm15 not saved
+# all %xmm registers are clobbered
+#
+.endm
+
+
+.macro FUNC_RESTORE
+ pop %r14
+ pop %r13
+ pop %r12
+.endm
+
+# Precompute hashkeys.
+# Input: Hash subkey.
+# Output: HashKeys stored in gcm_context_data. Only needs to be called
+# once per key.
+# clobbers r12, and tmp xmm registers.
+.macro PRECOMPUTE SUBKEY TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7
+ mov \SUBKEY, %r12
+ movdqu (%r12), \TMP3
+ movdqa SHUF_MASK(%rip), \TMP2
+ PSHUFB_XMM \TMP2, \TMP3
+
+ # precompute HashKey<<1 mod poly from the HashKey (required for GHASH)
+
+ movdqa \TMP3, \TMP2
+ psllq $1, \TMP3
+ psrlq $63, \TMP2
+ movdqa \TMP2, \TMP1
+ pslldq $8, \TMP2
+ psrldq $8, \TMP1
+ por \TMP2, \TMP3
+
+ # reduce HashKey<<1
+
+ pshufd $0x24, \TMP1, \TMP2
+ pcmpeqd TWOONE(%rip), \TMP2
+ pand POLY(%rip), \TMP2
+ pxor \TMP2, \TMP3
+ movdqa \TMP3, HashKey(%arg2)
+
+ movdqa \TMP3, \TMP5
+ pshufd $78, \TMP3, \TMP1
+ pxor \TMP3, \TMP1
+ movdqa \TMP1, HashKey_k(%arg2)
+
+ GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^2<<1 (mod poly)
+ movdqa \TMP5, HashKey_2(%arg2)
+# HashKey_2 = HashKey^2<<1 (mod poly)
+ pshufd $78, \TMP5, \TMP1
+ pxor \TMP5, \TMP1
+ movdqa \TMP1, HashKey_2_k(%arg2)
+
+ GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+ movdqa \TMP5, HashKey_3(%arg2)
+ pshufd $78, \TMP5, \TMP1
+ pxor \TMP5, \TMP1
+ movdqa \TMP1, HashKey_3_k(%arg2)
+
+ GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+ movdqa \TMP5, HashKey_4(%arg2)
+ pshufd $78, \TMP5, \TMP1
+ pxor \TMP5, \TMP1
+ movdqa \TMP1, HashKey_4_k(%arg2)
+.endm
+
+# GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding.
+# Clobbers rax, r10-r13 and xmm0-xmm6, %xmm13
+.macro GCM_INIT Iv SUBKEY AAD AADLEN
+ mov \AADLEN, %r11
+ mov %r11, AadLen(%arg2) # ctx_data.aad_length = aad_length
+ xor %r11, %r11
+ mov %r11, InLen(%arg2) # ctx_data.in_length = 0
+ mov %r11, PBlockLen(%arg2) # ctx_data.partial_block_length = 0
+ mov %r11, PBlockEncKey(%arg2) # ctx_data.partial_block_enc_key = 0
+ mov \Iv, %rax
+ movdqu (%rax), %xmm0
+ movdqu %xmm0, OrigIV(%arg2) # ctx_data.orig_IV = iv
+
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm0
+ movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
+
+ PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ movdqa HashKey(%arg2), %xmm13
+
+ CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
+ %xmm4, %xmm5, %xmm6
+.endm
+
+# GCM_ENC_DEC Encodes/Decodes given data. Assumes that the passed gcm_context
+# struct has been initialized by GCM_INIT.
+# Requires the input data be at least 1 byte long because of READ_PARTIAL_BLOCK
+# Clobbers rax, r10-r13, and xmm0-xmm15
+.macro GCM_ENC_DEC operation
+ movdqu AadHash(%arg2), %xmm8
+ movdqu HashKey(%arg2), %xmm13
+ add %arg5, InLen(%arg2)
+
+ xor %r11, %r11 # initialise the data pointer offset as zero
+ PARTIAL_BLOCK %arg3 %arg4 %arg5 %r11 %xmm8 \operation
+
+ sub %r11, %arg5 # sub partial block data used
+ mov %arg5, %r13 # save the number of bytes
+
+ and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
+ mov %r13, %r12
+ # Encrypt/Decrypt first few blocks
+
+ and $(3<<4), %r12
+ jz _initial_num_blocks_is_0_\@
+ cmp $(2<<4), %r12
+ jb _initial_num_blocks_is_1_\@
+ je _initial_num_blocks_is_2_\@
+_initial_num_blocks_is_3_\@:
+ INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, \operation
+ sub $48, %r13
+ jmp _initial_blocks_\@
+_initial_num_blocks_is_2_\@:
+ INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, \operation
+ sub $32, %r13
+ jmp _initial_blocks_\@
+_initial_num_blocks_is_1_\@:
+ INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, \operation
+ sub $16, %r13
+ jmp _initial_blocks_\@
+_initial_num_blocks_is_0_\@:
+ INITIAL_BLOCKS_ENC_DEC %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, \operation
+_initial_blocks_\@:
+
+ # Main loop - Encrypt/Decrypt remaining blocks
+
+ cmp $0, %r13
+ je _zero_cipher_left_\@
+ sub $64, %r13
+ je _four_cipher_left_\@
+_crypt_by_4_\@:
+ GHASH_4_ENCRYPT_4_PARALLEL_\operation %xmm9, %xmm10, %xmm11, %xmm12, \
+ %xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, \
+ %xmm7, %xmm8, enc
+ add $64, %r11
+ sub $64, %r13
+ jne _crypt_by_4_\@
+_four_cipher_left_\@:
+ GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
+%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
+_zero_cipher_left_\@:
+ movdqu %xmm8, AadHash(%arg2)
+ movdqu %xmm0, CurCount(%arg2)
+
+ mov %arg5, %r13
+ and $15, %r13 # %r13 = arg5 (mod 16)
+ je _multiple_of_16_bytes_\@
+
+ mov %r13, PBlockLen(%arg2)
+
+ # Handle the last <16 Byte block separately
+ paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
+ movdqu %xmm0, CurCount(%arg2)
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10, %xmm0
+
+ ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
+ movdqu %xmm0, PBlockEncKey(%arg2)
+
+ cmp $16, %arg5
+ jge _large_enough_update_\@
+
+ lea (%arg4,%r11,1), %r10
+ mov %r13, %r12
+ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
+ jmp _data_read_\@
+
+_large_enough_update_\@:
+ sub $16, %r11
+ add %r13, %r11
+
+ # receive the last <16 Byte block
+ movdqu (%arg4, %r11, 1), %xmm1
+
+ sub %r13, %r11
+ add $16, %r11
+
+ lea SHIFT_MASK+16(%rip), %r12
+ # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+ # (r13 is the number of bytes in plaintext mod 16)
+ sub %r13, %r12
+ # get the appropriate shuffle mask
+ movdqu (%r12), %xmm2
+ # shift right 16-r13 bytes
+ PSHUFB_XMM %xmm2, %xmm1
+
+_data_read_\@:
+ lea ALL_F+16(%rip), %r12
+ sub %r13, %r12
+
+.ifc \operation, dec
+ movdqa %xmm1, %xmm2
+.endif
+ pxor %xmm1, %xmm0 # XOR Encrypt(K, Yn)
+ movdqu (%r12), %xmm1
+ # get the appropriate mask to mask out top 16-r13 bytes of xmm0
+ pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
+.ifc \operation, dec
+ pand %xmm1, %xmm2
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10 ,%xmm2
+
+ pxor %xmm2, %xmm8
+.else
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10,%xmm0
+
+ pxor %xmm0, %xmm8
+.endif
+
+ movdqu %xmm8, AadHash(%arg2)
+.ifc \operation, enc
+ # GHASH computation for the last <16 byte block
+ movdqa SHUF_MASK(%rip), %xmm10
+ # shuffle xmm0 back to output as ciphertext
+ PSHUFB_XMM %xmm10, %xmm0
+.endif
+
+ # Output %r13 bytes
+ MOVQ_R64_XMM %xmm0, %rax
+ cmp $8, %r13
+ jle _less_than_8_bytes_left_\@
+ mov %rax, (%arg3 , %r11, 1)
+ add $8, %r11
+ psrldq $8, %xmm0
+ MOVQ_R64_XMM %xmm0, %rax
+ sub $8, %r13
+_less_than_8_bytes_left_\@:
+ mov %al, (%arg3, %r11, 1)
+ add $1, %r11
+ shr $8, %rax
+ sub $1, %r13
+ jne _less_than_8_bytes_left_\@
+_multiple_of_16_bytes_\@:
+.endm
+
+# GCM_COMPLETE Finishes update of tag of last partial block
+# Output: Authorization Tag (AUTH_TAG)
+# Clobbers rax, r10-r12, and xmm0, xmm1, xmm5-xmm15
+.macro GCM_COMPLETE AUTHTAG AUTHTAGLEN
+ movdqu AadHash(%arg2), %xmm8
+ movdqu HashKey(%arg2), %xmm13
+
+ mov PBlockLen(%arg2), %r12
+
+ cmp $0, %r12
+ je _partial_done\@
+
+ GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+
+_partial_done\@:
+ mov AadLen(%arg2), %r12 # %r13 = aadLen (number of bytes)
+ shl $3, %r12 # convert into number of bits
+ movd %r12d, %xmm15 # len(A) in %xmm15
+ mov InLen(%arg2), %r12
+ shl $3, %r12 # len(C) in bits (*128)
+ MOVQ_R64_XMM %r12, %xmm1
+
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+ pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C)
+ pxor %xmm15, %xmm8
+ GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+ # final GHASH computation
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10, %xmm8
+
+ movdqu OrigIV(%arg2), %xmm0 # %xmm0 = Y0
+ ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0)
+ pxor %xmm8, %xmm0
+_return_T_\@:
+ mov \AUTHTAG, %r10 # %r10 = authTag
+ mov \AUTHTAGLEN, %r11 # %r11 = auth_tag_len
+ cmp $16, %r11
+ je _T_16_\@
+ cmp $8, %r11
+ jl _T_4_\@
+_T_8_\@:
+ MOVQ_R64_XMM %xmm0, %rax
+ mov %rax, (%r10)
+ add $8, %r10
+ sub $8, %r11
+ psrldq $8, %xmm0
+ cmp $0, %r11
+ je _return_T_done_\@
+_T_4_\@:
+ movd %xmm0, %eax
+ mov %eax, (%r10)
+ add $4, %r10
+ sub $4, %r11
+ psrldq $4, %xmm0
+ cmp $0, %r11
+ je _return_T_done_\@
+_T_123_\@:
+ movd %xmm0, %eax
+ cmp $2, %r11
+ jl _T_1_\@
+ mov %ax, (%r10)
+ cmp $2, %r11
+ je _return_T_done_\@
+ add $2, %r10
+ sar $16, %eax
+_T_1_\@:
+ mov %al, (%r10)
+ jmp _return_T_done_\@
+_T_16_\@:
+ movdqu %xmm0, (%r10)
+_return_T_done_\@:
+.endm
#ifdef __x86_64__
/* GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
@@ -264,232 +598,188 @@ _read_next_byte_lt8_\@:
_done_read_partial_block_\@:
.endm
-/*
-* if a = number of total plaintext bytes
-* b = floor(a/16)
-* num_initial_blocks = b mod 4
-* encrypt the initial num_initial_blocks blocks and apply ghash on
-* the ciphertext
-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
-* are clobbered
-* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
-*/
-
-
-.macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
-XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
- MOVADQ SHUF_MASK(%rip), %xmm14
- mov arg7, %r10 # %r10 = AAD
- mov arg8, %r11 # %r11 = aadLen
- pxor %xmm\i, %xmm\i
- pxor \XMM2, \XMM2
+# CALC_AAD_HASH: Calculates the hash of the data which will not be encrypted.
+# clobbers r10-11, xmm14
+.macro CALC_AAD_HASH HASHKEY AAD AADLEN TMP1 TMP2 TMP3 TMP4 TMP5 \
+ TMP6 TMP7
+ MOVADQ SHUF_MASK(%rip), %xmm14
+ mov \AAD, %r10 # %r10 = AAD
+ mov \AADLEN, %r11 # %r11 = aadLen
+ pxor \TMP7, \TMP7
+ pxor \TMP6, \TMP6
cmp $16, %r11
- jl _get_AAD_rest\num_initial_blocks\operation
-_get_AAD_blocks\num_initial_blocks\operation:
- movdqu (%r10), %xmm\i
- PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
- pxor %xmm\i, \XMM2
- GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+ jl _get_AAD_rest\@
+_get_AAD_blocks\@:
+ movdqu (%r10), \TMP7
+ PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data
+ pxor \TMP7, \TMP6
+ GHASH_MUL \TMP6, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
add $16, %r10
sub $16, %r11
cmp $16, %r11
- jge _get_AAD_blocks\num_initial_blocks\operation
+ jge _get_AAD_blocks\@
- movdqu \XMM2, %xmm\i
+ movdqu \TMP6, \TMP7
/* read the last <16B of AAD */
-_get_AAD_rest\num_initial_blocks\operation:
+_get_AAD_rest\@:
cmp $0, %r11
- je _get_AAD_done\num_initial_blocks\operation
-
- READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
- PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
- pxor \XMM2, %xmm\i
- GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+ je _get_AAD_done\@
-_get_AAD_done\num_initial_blocks\operation:
- xor %r11, %r11 # initialise the data pointer offset as zero
- # start AES for num_initial_blocks blocks
-
- mov %arg5, %rax # %rax = *Y0
- movdqu (%rax), \XMM0 # XMM0 = Y0
- PSHUFB_XMM %xmm14, \XMM0
+ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
+ PSHUFB_XMM %xmm14, \TMP7 # byte-reflect the AAD data
+ pxor \TMP6, \TMP7
+ GHASH_MUL \TMP7, \HASHKEY, \TMP1, \TMP2, \TMP3, \TMP4, \TMP5
+ movdqu \TMP7, \TMP6
-.if (\i == 5) || (\i == 6) || (\i == 7)
- MOVADQ ONE(%RIP),\TMP1
- MOVADQ (%arg1),\TMP2
-.irpc index, \i_seq
- paddd \TMP1, \XMM0 # INCR Y0
- movdqa \XMM0, %xmm\index
- PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
- pxor \TMP2, %xmm\index
-.endr
- lea 0x10(%arg1),%r10
- mov keysize,%eax
- shr $2,%eax # 128->4, 192->6, 256->8
- add $5,%eax # 128->9, 192->11, 256->13
-
-aes_loop_initial_dec\num_initial_blocks:
- MOVADQ (%r10),\TMP1
-.irpc index, \i_seq
- AESENC \TMP1, %xmm\index
-.endr
- add $16,%r10
- sub $1,%eax
- jnz aes_loop_initial_dec\num_initial_blocks
-
- MOVADQ (%r10), \TMP1
-.irpc index, \i_seq
- AESENCLAST \TMP1, %xmm\index # Last Round
-.endr
-.irpc index, \i_seq
- movdqu (%arg3 , %r11, 1), \TMP1
- pxor \TMP1, %xmm\index
- movdqu %xmm\index, (%arg2 , %r11, 1)
- # write back plaintext/ciphertext for num_initial_blocks
- add $16, %r11
-
- movdqa \TMP1, %xmm\index
- PSHUFB_XMM %xmm14, %xmm\index
- # prepare plaintext/ciphertext for GHASH computation
-.endr
-.endif
-
- # apply GHASH on num_initial_blocks blocks
+_get_AAD_done\@:
+ movdqu \TMP6, AadHash(%arg2)
+.endm
-.if \i == 5
- pxor %xmm5, %xmm6
- GHASH_MUL %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
- pxor %xmm6, %xmm7
- GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
- pxor %xmm7, %xmm8
- GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
-.elseif \i == 6
- pxor %xmm6, %xmm7
- GHASH_MUL %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
- pxor %xmm7, %xmm8
- GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
-.elseif \i == 7
- pxor %xmm7, %xmm8
- GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+# PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks
+# between update calls.
+# Requires the input data be at least 1 byte long due to READ_PARTIAL_BLOCK
+# Outputs encrypted bytes, and updates hash and partial info in gcm_data_context
+# Clobbers rax, r10, r12, r13, xmm0-6, xmm9-13
+.macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
+ AAD_HASH operation
+ mov PBlockLen(%arg2), %r13
+ cmp $0, %r13
+ je _partial_block_done_\@ # Leave Macro if no partial blocks
+ # Read in input data without over reading
+ cmp $16, \PLAIN_CYPH_LEN
+ jl _fewer_than_16_bytes_\@
+ movups (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm
+ jmp _data_read_\@
+
+_fewer_than_16_bytes_\@:
+ lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10
+ mov \PLAIN_CYPH_LEN, %r12
+ READ_PARTIAL_BLOCK %r10 %r12 %xmm0 %xmm1
+
+ mov PBlockLen(%arg2), %r13
+
+_data_read_\@: # Finished reading in data
+
+ movdqu PBlockEncKey(%arg2), %xmm9
+ movdqu HashKey(%arg2), %xmm13
+
+ lea SHIFT_MASK(%rip), %r12
+
+ # adjust the shuffle mask pointer to be able to shift r13 bytes
+ # r16-r13 is the number of bytes in plaintext mod 16)
+ add %r13, %r12
+ movdqu (%r12), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm9 # shift right r13 bytes
+
+.ifc \operation, dec
+ movdqa %xmm1, %xmm3
+ pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn)
+
+ mov \PLAIN_CYPH_LEN, %r10
+ add %r13, %r10
+ # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling
+ sub $16, %r10
+ # Determine if if partial block is not being filled and
+ # shift mask accordingly
+ jge _no_extra_mask_1_\@
+ sub %r10, %r12
+_no_extra_mask_1_\@:
+
+ movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+ # get the appropriate mask to mask out bottom r13 bytes of xmm9
+ pand %xmm1, %xmm9 # mask out bottom r13 bytes of xmm9
+
+ pand %xmm1, %xmm3
+ movdqa SHUF_MASK(%rip), %xmm10
+ PSHUFB_XMM %xmm10, %xmm3
+ PSHUFB_XMM %xmm2, %xmm3
+ pxor %xmm3, \AAD_HASH
+
+ cmp $0, %r10
+ jl _partial_incomplete_1_\@
+
+ # GHASH computation for the last <16 Byte block
+ GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+ xor %rax,%rax
+
+ mov %rax, PBlockLen(%arg2)
+ jmp _dec_done_\@
+_partial_incomplete_1_\@:
+ add \PLAIN_CYPH_LEN, PBlockLen(%arg2)
+_dec_done_\@:
+ movdqu \AAD_HASH, AadHash(%arg2)
+.else
+ pxor %xmm1, %xmm9 # Plaintext XOR E(K, Yn)
+
+ mov \PLAIN_CYPH_LEN, %r10
+ add %r13, %r10
+ # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling
+ sub $16, %r10
+ # Determine if if partial block is not being filled and
+ # shift mask accordingly
+ jge _no_extra_mask_2_\@
+ sub %r10, %r12
+_no_extra_mask_2_\@:
+
+ movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+ # get the appropriate mask to mask out bottom r13 bytes of xmm9
+ pand %xmm1, %xmm9
+
+ movdqa SHUF_MASK(%rip), %xmm1
+ PSHUFB_XMM %xmm1, %xmm9
+ PSHUFB_XMM %xmm2, %xmm9
+ pxor %xmm9, \AAD_HASH
+
+ cmp $0, %r10
+ jl _partial_incomplete_2_\@
+
+ # GHASH computation for the last <16 Byte block
+ GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+ xor %rax,%rax
+
+ mov %rax, PBlockLen(%arg2)
+ jmp _encode_done_\@
+_partial_incomplete_2_\@:
+ add \PLAIN_CYPH_LEN, PBlockLen(%arg2)
+_encode_done_\@:
+ movdqu \AAD_HASH, AadHash(%arg2)
+
+ movdqa SHUF_MASK(%rip), %xmm10
+ # shuffle xmm9 back to output as ciphertext
+ PSHUFB_XMM %xmm10, %xmm9
+ PSHUFB_XMM %xmm2, %xmm9
.endif
- cmp $64, %r13
- jl _initial_blocks_done\num_initial_blocks\operation
- # no need for precomputed values
-/*
-*
-* Precomputations for HashKey parallel with encryption of first 4 blocks.
-* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
-*/
- MOVADQ ONE(%rip), \TMP1
- paddd \TMP1, \XMM0 # INCR Y0
- MOVADQ \XMM0, \XMM1
- PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
-
- paddd \TMP1, \XMM0 # INCR Y0
- MOVADQ \XMM0, \XMM2
- PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
-
- paddd \TMP1, \XMM0 # INCR Y0
- MOVADQ \XMM0, \XMM3
- PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
-
- paddd \TMP1, \XMM0 # INCR Y0
- MOVADQ \XMM0, \XMM4
- PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
-
- MOVADQ 0(%arg1),\TMP1
- pxor \TMP1, \XMM1
- pxor \TMP1, \XMM2
- pxor \TMP1, \XMM3
- pxor \TMP1, \XMM4
- movdqa \TMP3, \TMP5
- pshufd $78, \TMP3, \TMP1
- pxor \TMP3, \TMP1
- movdqa \TMP1, HashKey_k(%rsp)
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^2<<1 (mod poly)
- movdqa \TMP5, HashKey_2(%rsp)
-# HashKey_2 = HashKey^2<<1 (mod poly)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_2_k(%rsp)
-.irpc index, 1234 # do 4 rounds
- movaps 0x10*\index(%arg1), \TMP1
- AESENC \TMP1, \XMM1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
-.endr
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_3(%rsp)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_3_k(%rsp)
-.irpc index, 56789 # do next 5 rounds
- movaps 0x10*\index(%arg1), \TMP1
- AESENC \TMP1, \XMM1
- AESENC \TMP1, \XMM2
- AESENC \TMP1, \XMM3
- AESENC \TMP1, \XMM4
-.endr
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_4(%rsp)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_4_k(%rsp)
- lea 0xa0(%arg1),%r10
- mov keysize,%eax
- shr $2,%eax # 128->4, 192->6, 256->8
- sub $4,%eax # 128->0, 192->2, 256->4
- jz aes_loop_pre_dec_done\num_initial_blocks
-
-aes_loop_pre_dec\num_initial_blocks:
- MOVADQ (%r10),\TMP2
-.irpc index, 1234
- AESENC \TMP2, %xmm\index
-.endr
- add $16,%r10
- sub $1,%eax
- jnz aes_loop_pre_dec\num_initial_blocks
-
-aes_loop_pre_dec_done\num_initial_blocks:
- MOVADQ (%r10), \TMP2
- AESENCLAST \TMP2, \XMM1
- AESENCLAST \TMP2, \XMM2
- AESENCLAST \TMP2, \XMM3
- AESENCLAST \TMP2, \XMM4
- movdqu 16*0(%arg3 , %r11 , 1), \TMP1
- pxor \TMP1, \XMM1
- movdqu \XMM1, 16*0(%arg2 , %r11 , 1)
- movdqa \TMP1, \XMM1
- movdqu 16*1(%arg3 , %r11 , 1), \TMP1
- pxor \TMP1, \XMM2
- movdqu \XMM2, 16*1(%arg2 , %r11 , 1)
- movdqa \TMP1, \XMM2
- movdqu 16*2(%arg3 , %r11 , 1), \TMP1
- pxor \TMP1, \XMM3
- movdqu \XMM3, 16*2(%arg2 , %r11 , 1)
- movdqa \TMP1, \XMM3
- movdqu 16*3(%arg3 , %r11 , 1), \TMP1
- pxor \TMP1, \XMM4
- movdqu \XMM4, 16*3(%arg2 , %r11 , 1)
- movdqa \TMP1, \XMM4
- add $64, %r11
- PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
- pxor \XMMDst, \XMM1
-# combine GHASHed value with the corresponding ciphertext
- PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
- PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
- PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
-
-_initial_blocks_done\num_initial_blocks\operation:
-
-.endm
+ # output encrypted Bytes
+ cmp $0, %r10
+ jl _partial_fill_\@
+ mov %r13, %r12
+ mov $16, %r13
+ # Set r13 to be the number of bytes to write out
+ sub %r12, %r13
+ jmp _count_set_\@
+_partial_fill_\@:
+ mov \PLAIN_CYPH_LEN, %r13
+_count_set_\@:
+ movdqa %xmm9, %xmm0
+ MOVQ_R64_XMM %xmm0, %rax
+ cmp $8, %r13
+ jle _less_than_8_bytes_left_\@
+ mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
+ add $8, \DATA_OFFSET
+ psrldq $8, %xmm0
+ MOVQ_R64_XMM %xmm0, %rax
+ sub $8, %r13
+_less_than_8_bytes_left_\@:
+ movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
+ add $1, \DATA_OFFSET
+ shr $8, %rax
+ sub $1, %r13
+ jne _less_than_8_bytes_left_\@
+_partial_block_done_\@:
+.endm # PARTIAL_BLOCK
/*
* if a = number of total plaintext bytes
@@ -499,49 +789,19 @@ _initial_blocks_done\num_initial_blocks\operation:
* the ciphertext
* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
* are clobbered
-* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+* arg1, %arg2, %arg3 are used as a pointer only, not modified
*/
-.macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
-XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
- MOVADQ SHUF_MASK(%rip), %xmm14
- mov arg7, %r10 # %r10 = AAD
- mov arg8, %r11 # %r11 = aadLen
- pxor %xmm\i, %xmm\i
- pxor \XMM2, \XMM2
+.macro INITIAL_BLOCKS_ENC_DEC TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ MOVADQ SHUF_MASK(%rip), %xmm14
- cmp $16, %r11
- jl _get_AAD_rest\num_initial_blocks\operation
-_get_AAD_blocks\num_initial_blocks\operation:
- movdqu (%r10), %xmm\i
- PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
- pxor %xmm\i, \XMM2
- GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
- add $16, %r10
- sub $16, %r11
- cmp $16, %r11
- jge _get_AAD_blocks\num_initial_blocks\operation
+ movdqu AadHash(%arg2), %xmm\i # XMM0 = Y0
- movdqu \XMM2, %xmm\i
-
- /* read the last <16B of AAD */
-_get_AAD_rest\num_initial_blocks\operation:
- cmp $0, %r11
- je _get_AAD_done\num_initial_blocks\operation
-
- READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
- PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
- pxor \XMM2, %xmm\i
- GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
-
-_get_AAD_done\num_initial_blocks\operation:
- xor %r11, %r11 # initialise the data pointer offset as zero
# start AES for num_initial_blocks blocks
- mov %arg5, %rax # %rax = *Y0
- movdqu (%rax), \XMM0 # XMM0 = Y0
- PSHUFB_XMM %xmm14, \XMM0
+ movdqu CurCount(%arg2), \XMM0 # XMM0 = Y0
.if (\i == 5) || (\i == 6) || (\i == 7)
@@ -549,7 +809,11 @@ _get_AAD_done\num_initial_blocks\operation:
MOVADQ 0(%arg1),\TMP2
.irpc index, \i_seq
paddd \TMP1, \XMM0 # INCR Y0
+.ifc \operation, dec
+ movdqa \XMM0, %xmm\index
+.else
MOVADQ \XMM0, %xmm\index
+.endif
PSHUFB_XMM %xmm14, %xmm\index # perform a 16 byte swap
pxor \TMP2, %xmm\index
.endr
@@ -558,25 +822,29 @@ _get_AAD_done\num_initial_blocks\operation:
shr $2,%eax # 128->4, 192->6, 256->8
add $5,%eax # 128->9, 192->11, 256->13
-aes_loop_initial_enc\num_initial_blocks:
+aes_loop_initial_\@:
MOVADQ (%r10),\TMP1
.irpc index, \i_seq
AESENC \TMP1, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_initial_enc\num_initial_blocks
+ jnz aes_loop_initial_\@
MOVADQ (%r10), \TMP1
.irpc index, \i_seq
AESENCLAST \TMP1, %xmm\index # Last Round
.endr
.irpc index, \i_seq
- movdqu (%arg3 , %r11, 1), \TMP1
+ movdqu (%arg4 , %r11, 1), \TMP1
pxor \TMP1, %xmm\index
- movdqu %xmm\index, (%arg2 , %r11, 1)
+ movdqu %xmm\index, (%arg3 , %r11, 1)
# write back plaintext/ciphertext for num_initial_blocks
add $16, %r11
+
+.ifc \operation, dec
+ movdqa \TMP1, %xmm\index
+.endif
PSHUFB_XMM %xmm14, %xmm\index
# prepare plaintext/ciphertext for GHASH computation
@@ -602,7 +870,7 @@ aes_loop_initial_enc\num_initial_blocks:
GHASH_MUL %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
.endif
cmp $64, %r13
- jl _initial_blocks_done\num_initial_blocks\operation
+ jl _initial_blocks_done\@
# no need for precomputed values
/*
*
@@ -631,17 +899,6 @@ aes_loop_initial_enc\num_initial_blocks:
pxor \TMP1, \XMM2
pxor \TMP1, \XMM3
pxor \TMP1, \XMM4
- movdqa \TMP3, \TMP5
- pshufd $78, \TMP3, \TMP1
- pxor \TMP3, \TMP1
- movdqa \TMP1, HashKey_k(%rsp)
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^2<<1 (mod poly)
- movdqa \TMP5, HashKey_2(%rsp)
-# HashKey_2 = HashKey^2<<1 (mod poly)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_2_k(%rsp)
.irpc index, 1234 # do 4 rounds
movaps 0x10*\index(%arg1), \TMP1
AESENC \TMP1, \XMM1
@@ -649,12 +906,6 @@ aes_loop_initial_enc\num_initial_blocks:
AESENC \TMP1, \XMM3
AESENC \TMP1, \XMM4
.endr
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_3(%rsp)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_3_k(%rsp)
.irpc index, 56789 # do next 5 rounds
movaps 0x10*\index(%arg1), \TMP1
AESENC \TMP1, \XMM1
@@ -662,45 +913,56 @@ aes_loop_initial_enc\num_initial_blocks:
AESENC \TMP1, \XMM3
AESENC \TMP1, \XMM4
.endr
- GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
-# TMP5 = HashKey^3<<1 (mod poly)
- movdqa \TMP5, HashKey_4(%rsp)
- pshufd $78, \TMP5, \TMP1
- pxor \TMP5, \TMP1
- movdqa \TMP1, HashKey_4_k(%rsp)
lea 0xa0(%arg1),%r10
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
- jz aes_loop_pre_enc_done\num_initial_blocks
+ jz aes_loop_pre_done\@
-aes_loop_pre_enc\num_initial_blocks:
+aes_loop_pre_\@:
MOVADQ (%r10),\TMP2
.irpc index, 1234
AESENC \TMP2, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_pre_enc\num_initial_blocks
+ jnz aes_loop_pre_\@
-aes_loop_pre_enc_done\num_initial_blocks:
+aes_loop_pre_done\@:
MOVADQ (%r10), \TMP2
AESENCLAST \TMP2, \XMM1
AESENCLAST \TMP2, \XMM2
AESENCLAST \TMP2, \XMM3
AESENCLAST \TMP2, \XMM4
- movdqu 16*0(%arg3 , %r11 , 1), \TMP1
+ movdqu 16*0(%arg4 , %r11 , 1), \TMP1
pxor \TMP1, \XMM1
- movdqu 16*1(%arg3 , %r11 , 1), \TMP1
+.ifc \operation, dec
+ movdqu \XMM1, 16*0(%arg3 , %r11 , 1)
+ movdqa \TMP1, \XMM1
+.endif
+ movdqu 16*1(%arg4 , %r11 , 1), \TMP1
pxor \TMP1, \XMM2
- movdqu 16*2(%arg3 , %r11 , 1), \TMP1
+.ifc \operation, dec
+ movdqu \XMM2, 16*1(%arg3 , %r11 , 1)
+ movdqa \TMP1, \XMM2
+.endif
+ movdqu 16*2(%arg4 , %r11 , 1), \TMP1
pxor \TMP1, \XMM3
- movdqu 16*3(%arg3 , %r11 , 1), \TMP1
+.ifc \operation, dec
+ movdqu \XMM3, 16*2(%arg3 , %r11 , 1)
+ movdqa \TMP1, \XMM3
+.endif
+ movdqu 16*3(%arg4 , %r11 , 1), \TMP1
pxor \TMP1, \XMM4
- movdqu \XMM1, 16*0(%arg2 , %r11 , 1)
- movdqu \XMM2, 16*1(%arg2 , %r11 , 1)
- movdqu \XMM3, 16*2(%arg2 , %r11 , 1)
- movdqu \XMM4, 16*3(%arg2 , %r11 , 1)
+.ifc \operation, dec
+ movdqu \XMM4, 16*3(%arg3 , %r11 , 1)
+ movdqa \TMP1, \XMM4
+.else
+ movdqu \XMM1, 16*0(%arg3 , %r11 , 1)
+ movdqu \XMM2, 16*1(%arg3 , %r11 , 1)
+ movdqu \XMM3, 16*2(%arg3 , %r11 , 1)
+ movdqu \XMM4, 16*3(%arg3 , %r11 , 1)
+.endif
add $64, %r11
PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
@@ -710,14 +972,14 @@ aes_loop_pre_enc_done\num_initial_blocks:
PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
-_initial_blocks_done\num_initial_blocks\operation:
+_initial_blocks_done\@:
.endm
/*
* encrypt 4 blocks at a time
* ghash the 4 previously encrypted ciphertext blocks
-* arg1, %arg2, %arg3 are used as pointers only, not modified
+* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
@@ -735,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM5, \TMP6
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
- movdqa HashKey_4(%rsp), \TMP5
+ movdqa HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
@@ -754,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
- movdqa HashKey_4_k(%rsp), \TMP5
+ movdqa HashKey_4_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
AESENC \TMP1, \XMM1 # Round 1
@@ -769,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
- movdqa HashKey_3(%rsp), \TMP5
+ movdqa HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 3
@@ -782,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_3_k(%rsp), \TMP5
+ movdqa HashKey_3_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 5
@@ -796,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM7, \TMP1
pshufd $78, \XMM7, \TMP2
pxor \XMM7, \TMP2
- movdqa HashKey_2(%rsp ), \TMP5
+ movdqa HashKey_2(%arg2), \TMP5
# Multiply TMP5 * HashKey using karatsuba
@@ -812,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_2_k(%rsp), \TMP5
+ movdqa HashKey_2_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 8
@@ -830,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM8, \TMP1
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
- movdqa HashKey(%rsp), \TMP5
+ movdqa HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 9
@@ -842,37 +1104,37 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
- jz aes_loop_par_enc_done
+ jz aes_loop_par_enc_done\@
-aes_loop_par_enc:
+aes_loop_par_enc\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
AESENC \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_par_enc
+ jnz aes_loop_par_enc\@
-aes_loop_par_enc_done:
+aes_loop_par_enc_done\@:
MOVADQ (%r10), \TMP3
AESENCLAST \TMP3, \XMM1 # Round 10
AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3
AESENCLAST \TMP3, \XMM4
- movdqa HashKey_k(%rsp), \TMP5
+ movdqa HashKey_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
- movdqu (%arg3,%r11,1), \TMP3
+ movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
- movdqu 16(%arg3,%r11,1), \TMP3
+ movdqu 16(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
- movdqu 32(%arg3,%r11,1), \TMP3
+ movdqu 32(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
- movdqu 48(%arg3,%r11,1), \TMP3
+ movdqu 48(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
- movdqu \XMM1, (%arg2,%r11,1) # Write to the ciphertext buffer
- movdqu \XMM2, 16(%arg2,%r11,1) # Write to the ciphertext buffer
- movdqu \XMM3, 32(%arg2,%r11,1) # Write to the ciphertext buffer
- movdqu \XMM4, 48(%arg2,%r11,1) # Write to the ciphertext buffer
+ movdqu \XMM1, (%arg3,%r11,1) # Write to the ciphertext buffer
+ movdqu \XMM2, 16(%arg3,%r11,1) # Write to the ciphertext buffer
+ movdqu \XMM3, 32(%arg3,%r11,1) # Write to the ciphertext buffer
+ movdqu \XMM4, 48(%arg3,%r11,1) # Write to the ciphertext buffer
PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
PSHUFB_XMM %xmm15, \XMM3 # perform a 16 byte swap
@@ -925,7 +1187,7 @@ aes_loop_par_enc_done:
/*
* decrypt 4 blocks at a time
* ghash the 4 previously decrypted ciphertext blocks
-* arg1, %arg2, %arg3 are used as pointers only, not modified
+* arg1, %arg3, %arg4 are used as pointers only, not modified
* %r11 is the data offset value
*/
.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
@@ -943,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pshufd $78, \XMM5, \TMP6
pxor \XMM5, \TMP6
paddd ONE(%rip), \XMM0 # INCR CNT
- movdqa HashKey_4(%rsp), \TMP5
+ movdqa HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
movdqa \XMM0, \XMM1
paddd ONE(%rip), \XMM0 # INCR CNT
@@ -962,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
pxor (%arg1), \XMM2
pxor (%arg1), \XMM3
pxor (%arg1), \XMM4
- movdqa HashKey_4_k(%rsp), \TMP5
+ movdqa HashKey_4_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
movaps 0x10(%arg1), \TMP1
AESENC \TMP1, \XMM1 # Round 1
@@ -977,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM6, \TMP1
pshufd $78, \XMM6, \TMP2
pxor \XMM6, \TMP2
- movdqa HashKey_3(%rsp), \TMP5
+ movdqa HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
movaps 0x30(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 3
@@ -990,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_3_k(%rsp), \TMP5
+ movdqa HashKey_3_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x50(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 5
@@ -1004,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM7, \TMP1
pshufd $78, \XMM7, \TMP2
pxor \XMM7, \TMP2
- movdqa HashKey_2(%rsp ), \TMP5
+ movdqa HashKey_2(%arg2), \TMP5
# Multiply TMP5 * HashKey using karatsuba
@@ -1020,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
AESENC \TMP3, \XMM2
AESENC \TMP3, \XMM3
AESENC \TMP3, \XMM4
- movdqa HashKey_2_k(%rsp), \TMP5
+ movdqa HashKey_2_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movaps 0x80(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 8
@@ -1038,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
movdqa \XMM8, \TMP1
pshufd $78, \XMM8, \TMP2
pxor \XMM8, \TMP2
- movdqa HashKey(%rsp), \TMP5
+ movdqa HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
movaps 0x90(%arg1), \TMP3
AESENC \TMP3, \XMM1 # Round 9
@@ -1050,40 +1312,40 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
mov keysize,%eax
shr $2,%eax # 128->4, 192->6, 256->8
sub $4,%eax # 128->0, 192->2, 256->4
- jz aes_loop_par_dec_done
+ jz aes_loop_par_dec_done\@
-aes_loop_par_dec:
+aes_loop_par_dec\@:
MOVADQ (%r10),\TMP3
.irpc index, 1234
AESENC \TMP3, %xmm\index
.endr
add $16,%r10
sub $1,%eax
- jnz aes_loop_par_dec
+ jnz aes_loop_par_dec\@
-aes_loop_par_dec_done:
+aes_loop_par_dec_done\@:
MOVADQ (%r10), \TMP3
AESENCLAST \TMP3, \XMM1 # last round
AESENCLAST \TMP3, \XMM2
AESENCLAST \TMP3, \XMM3
AESENCLAST \TMP3, \XMM4
- movdqa HashKey_k(%rsp), \TMP5
+ movdqa HashKey_k(%arg2), \TMP5
PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
- movdqu (%arg3,%r11,1), \TMP3
+ movdqu (%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
- movdqu \XMM1, (%arg2,%r11,1) # Write to plaintext buffer
+ movdqu \XMM1, (%arg3,%r11,1) # Write to plaintext buffer
movdqa \TMP3, \XMM1
- movdqu 16(%arg3,%r11,1), \TMP3
+ movdqu 16(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM2 # Ciphertext/Plaintext XOR EK
- movdqu \XMM2, 16(%arg2,%r11,1) # Write to plaintext buffer
+ movdqu \XMM2, 16(%arg3,%r11,1) # Write to plaintext buffer
movdqa \TMP3, \XMM2
- movdqu 32(%arg3,%r11,1), \TMP3
+ movdqu 32(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM3 # Ciphertext/Plaintext XOR EK
- movdqu \XMM3, 32(%arg2,%r11,1) # Write to plaintext buffer
+ movdqu \XMM3, 32(%arg3,%r11,1) # Write to plaintext buffer
movdqa \TMP3, \XMM3
- movdqu 48(%arg3,%r11,1), \TMP3
+ movdqu 48(%arg4,%r11,1), \TMP3
pxor \TMP3, \XMM4 # Ciphertext/Plaintext XOR EK
- movdqu \XMM4, 48(%arg2,%r11,1) # Write to plaintext buffer
+ movdqu \XMM4, 48(%arg3,%r11,1) # Write to plaintext buffer
movdqa \TMP3, \XMM4
PSHUFB_XMM %xmm15, \XMM1 # perform a 16 byte swap
PSHUFB_XMM %xmm15, \XMM2 # perform a 16 byte swap
@@ -1143,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM1, \TMP6
pshufd $78, \XMM1, \TMP2
pxor \XMM1, \TMP2
- movdqa HashKey_4(%rsp), \TMP5
+ movdqa HashKey_4(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0
- movdqa HashKey_4_k(%rsp), \TMP4
+ movdqa HashKey_4_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
movdqa \XMM1, \XMMDst
movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
@@ -1156,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM2, \TMP1
pshufd $78, \XMM2, \TMP2
pxor \XMM2, \TMP2
- movdqa HashKey_3(%rsp), \TMP5
+ movdqa HashKey_3(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0
- movdqa HashKey_3_k(%rsp), \TMP4
+ movdqa HashKey_3_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM2, \XMMDst
@@ -1171,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM3, \TMP1
pshufd $78, \XMM3, \TMP2
pxor \XMM3, \TMP2
- movdqa HashKey_2(%rsp), \TMP5
+ movdqa HashKey_2(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0
- movdqa HashKey_2_k(%rsp), \TMP4
+ movdqa HashKey_2_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM3, \XMMDst
@@ -1184,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
movdqa \XMM4, \TMP1
pshufd $78, \XMM4, \TMP2
pxor \XMM4, \TMP2
- movdqa HashKey(%rsp), \TMP5
+ movdqa HashKey(%arg2), \TMP5
PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0
- movdqa HashKey_k(%rsp), \TMP4
+ movdqa HashKey_k(%arg2), \TMP4
PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
pxor \TMP1, \TMP6
pxor \XMM4, \XMMDst
@@ -1256,6 +1518,8 @@ _esb_loop_\@:
.endm
/*****************************************************************************
* void aesni_gcm_dec(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data
+* // Context data
* u8 *out, // Plaintext output. Encrypt in-place is allowed.
* const u8 *in, // Ciphertext input
* u64 plaintext_len, // Length of data in bytes for decryption.
@@ -1333,195 +1597,20 @@ _esb_loop_\@:
*
*****************************************************************************/
ENTRY(aesni_gcm_dec)
- push %r12
- push %r13
- push %r14
- mov %rsp, %r14
-/*
-* states of %xmm registers %xmm6:%xmm15 not saved
-* all %xmm registers are clobbered
-*/
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp # align rsp to 64 bytes
- mov %arg6, %r12
- movdqu (%r12), %xmm13 # %xmm13 = HashKey
- movdqa SHUF_MASK(%rip), %xmm2
- PSHUFB_XMM %xmm2, %xmm13
-
-
-# Precompute HashKey<<1 (mod poly) from the hash key (required for GHASH)
-
- movdqa %xmm13, %xmm2
- psllq $1, %xmm13
- psrlq $63, %xmm2
- movdqa %xmm2, %xmm1
- pslldq $8, %xmm2
- psrldq $8, %xmm1
- por %xmm2, %xmm13
-
- # Reduction
-
- pshufd $0x24, %xmm1, %xmm2
- pcmpeqd TWOONE(%rip), %xmm2
- pand POLY(%rip), %xmm2
- pxor %xmm2, %xmm13 # %xmm13 holds the HashKey<<1 (mod poly)
-
-
- # Decrypt first few blocks
-
- movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
- mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
- and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
- mov %r13, %r12
- and $(3<<4), %r12
- jz _initial_num_blocks_is_0_decrypt
- cmp $(2<<4), %r12
- jb _initial_num_blocks_is_1_decrypt
- je _initial_num_blocks_is_2_decrypt
-_initial_num_blocks_is_3_decrypt:
- INITIAL_BLOCKS_DEC 3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, dec
- sub $48, %r13
- jmp _initial_blocks_decrypted
-_initial_num_blocks_is_2_decrypt:
- INITIAL_BLOCKS_DEC 2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, dec
- sub $32, %r13
- jmp _initial_blocks_decrypted
-_initial_num_blocks_is_1_decrypt:
- INITIAL_BLOCKS_DEC 1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, dec
- sub $16, %r13
- jmp _initial_blocks_decrypted
-_initial_num_blocks_is_0_decrypt:
- INITIAL_BLOCKS_DEC 0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, dec
-_initial_blocks_decrypted:
- cmp $0, %r13
- je _zero_cipher_left_decrypt
- sub $64, %r13
- je _four_cipher_left_decrypt
-_decrypt_by_4:
- GHASH_4_ENCRYPT_4_PARALLEL_DEC %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
-%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, dec
- add $64, %r11
- sub $64, %r13
- jne _decrypt_by_4
-_four_cipher_left_decrypt:
- GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
-%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
-_zero_cipher_left_decrypt:
- mov %arg4, %r13
- and $15, %r13 # %r13 = arg4 (mod 16)
- je _multiple_of_16_bytes_decrypt
-
- # Handle the last <16 byte block separately
-
- paddd ONE(%rip), %xmm0 # increment CNT to get Yn
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm0
-
- ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
-
- lea (%arg3,%r11,1), %r10
- mov %r13, %r12
- READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
-
- lea ALL_F+16(%rip), %r12
- sub %r13, %r12
- movdqa %xmm1, %xmm2
- pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
- movdqu (%r12), %xmm1
- # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
- pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
- pand %xmm1, %xmm2
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10 ,%xmm2
-
- pxor %xmm2, %xmm8
- GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
-
- # output %r13 bytes
- MOVQ_R64_XMM %xmm0, %rax
- cmp $8, %r13
- jle _less_than_8_bytes_left_decrypt
- mov %rax, (%arg2 , %r11, 1)
- add $8, %r11
- psrldq $8, %xmm0
- MOVQ_R64_XMM %xmm0, %rax
- sub $8, %r13
-_less_than_8_bytes_left_decrypt:
- mov %al, (%arg2, %r11, 1)
- add $1, %r11
- shr $8, %rax
- sub $1, %r13
- jne _less_than_8_bytes_left_decrypt
-_multiple_of_16_bytes_decrypt:
- mov arg8, %r12 # %r13 = aadLen (number of bytes)
- shl $3, %r12 # convert into number of bits
- movd %r12d, %xmm15 # len(A) in %xmm15
- shl $3, %arg4 # len(C) in bits (*128)
- MOVQ_R64_XMM %arg4, %xmm1
- pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
- pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C)
- pxor %xmm15, %xmm8
- GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
- # final GHASH computation
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm8
+ FUNC_SAVE
- mov %arg5, %rax # %rax = *Y0
- movdqu (%rax), %xmm0 # %xmm0 = Y0
- ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Y0)
- pxor %xmm8, %xmm0
-_return_T_decrypt:
- mov arg9, %r10 # %r10 = authTag
- mov arg10, %r11 # %r11 = auth_tag_len
- cmp $16, %r11
- je _T_16_decrypt
- cmp $8, %r11
- jl _T_4_decrypt
-_T_8_decrypt:
- MOVQ_R64_XMM %xmm0, %rax
- mov %rax, (%r10)
- add $8, %r10
- sub $8, %r11
- psrldq $8, %xmm0
- cmp $0, %r11
- je _return_T_done_decrypt
-_T_4_decrypt:
- movd %xmm0, %eax
- mov %eax, (%r10)
- add $4, %r10
- sub $4, %r11
- psrldq $4, %xmm0
- cmp $0, %r11
- je _return_T_done_decrypt
-_T_123_decrypt:
- movd %xmm0, %eax
- cmp $2, %r11
- jl _T_1_decrypt
- mov %ax, (%r10)
- cmp $2, %r11
- je _return_T_done_decrypt
- add $2, %r10
- sar $16, %eax
-_T_1_decrypt:
- mov %al, (%r10)
- jmp _return_T_done_decrypt
-_T_16_decrypt:
- movdqu %xmm0, (%r10)
-_return_T_done_decrypt:
- mov %r14, %rsp
- pop %r14
- pop %r13
- pop %r12
+ GCM_INIT %arg6, arg7, arg8, arg9
+ GCM_ENC_DEC dec
+ GCM_COMPLETE arg10, arg11
+ FUNC_RESTORE
ret
ENDPROC(aesni_gcm_dec)
/*****************************************************************************
* void aesni_gcm_enc(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data
+* // Context data
* u8 *out, // Ciphertext output. Encrypt in-place is allowed.
* const u8 *in, // Plaintext input
* u64 plaintext_len, // Length of data in bytes for encryption.
@@ -1596,195 +1685,78 @@ ENDPROC(aesni_gcm_dec)
* poly = x^128 + x^127 + x^126 + x^121 + 1
***************************************************************************/
ENTRY(aesni_gcm_enc)
- push %r12
- push %r13
- push %r14
- mov %rsp, %r14
-#
-# states of %xmm registers %xmm6:%xmm15 not saved
-# all %xmm registers are clobbered
-#
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp
- mov %arg6, %r12
- movdqu (%r12), %xmm13
- movdqa SHUF_MASK(%rip), %xmm2
- PSHUFB_XMM %xmm2, %xmm13
-
-
-# precompute HashKey<<1 mod poly from the HashKey (required for GHASH)
-
- movdqa %xmm13, %xmm2
- psllq $1, %xmm13
- psrlq $63, %xmm2
- movdqa %xmm2, %xmm1
- pslldq $8, %xmm2
- psrldq $8, %xmm1
- por %xmm2, %xmm13
-
- # reduce HashKey<<1
-
- pshufd $0x24, %xmm1, %xmm2
- pcmpeqd TWOONE(%rip), %xmm2
- pand POLY(%rip), %xmm2
- pxor %xmm2, %xmm13
- movdqa %xmm13, HashKey(%rsp)
- mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
- and $-16, %r13
- mov %r13, %r12
+ FUNC_SAVE
- # Encrypt first few blocks
+ GCM_INIT %arg6, arg7, arg8, arg9
+ GCM_ENC_DEC enc
- and $(3<<4), %r12
- jz _initial_num_blocks_is_0_encrypt
- cmp $(2<<4), %r12
- jb _initial_num_blocks_is_1_encrypt
- je _initial_num_blocks_is_2_encrypt
-_initial_num_blocks_is_3_encrypt:
- INITIAL_BLOCKS_ENC 3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, enc
- sub $48, %r13
- jmp _initial_blocks_encrypted
-_initial_num_blocks_is_2_encrypt:
- INITIAL_BLOCKS_ENC 2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, enc
- sub $32, %r13
- jmp _initial_blocks_encrypted
-_initial_num_blocks_is_1_encrypt:
- INITIAL_BLOCKS_ENC 1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, enc
- sub $16, %r13
- jmp _initial_blocks_encrypted
-_initial_num_blocks_is_0_encrypt:
- INITIAL_BLOCKS_ENC 0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
-%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, enc
-_initial_blocks_encrypted:
-
- # Main loop - Encrypt remaining blocks
-
- cmp $0, %r13
- je _zero_cipher_left_encrypt
- sub $64, %r13
- je _four_cipher_left_encrypt
-_encrypt_by_4_encrypt:
- GHASH_4_ENCRYPT_4_PARALLEL_ENC %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
-%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, enc
- add $64, %r11
- sub $64, %r13
- jne _encrypt_by_4_encrypt
-_four_cipher_left_encrypt:
- GHASH_LAST_4 %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
-%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
-_zero_cipher_left_encrypt:
- mov %arg4, %r13
- and $15, %r13 # %r13 = arg4 (mod 16)
- je _multiple_of_16_bytes_encrypt
-
- # Handle the last <16 Byte block separately
- paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm0
-
- ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
-
- lea (%arg3,%r11,1), %r10
- mov %r13, %r12
- READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
-
- lea ALL_F+16(%rip), %r12
- sub %r13, %r12
- pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
- movdqu (%r12), %xmm1
- # get the appropriate mask to mask out top 16-r13 bytes of xmm0
- pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10,%xmm0
+ GCM_COMPLETE arg10, arg11
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_enc)
- pxor %xmm0, %xmm8
- GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
- # GHASH computation for the last <16 byte block
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm0
+/*****************************************************************************
+* void aesni_gcm_init(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data,
+* // context data
+* u8 *iv, // Pre-counter block j0: 4 byte salt (from Security Association)
+* // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload)
+* // concatenated with 0x00000001. 16-byte aligned pointer.
+* u8 *hash_subkey, // H, the Hash sub key input. Data starts on a 16-byte boundary.
+* const u8 *aad, // Additional Authentication Data (AAD)
+* u64 aad_len) // Length of AAD in bytes.
+*/
+ENTRY(aesni_gcm_init)
+ FUNC_SAVE
+ GCM_INIT %arg3, %arg4,%arg5, %arg6
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_init)
- # shuffle xmm0 back to output as ciphertext
+/*****************************************************************************
+* void aesni_gcm_enc_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data,
+* // context data
+* u8 *out, // Ciphertext output. Encrypt in-place is allowed.
+* const u8 *in, // Plaintext input
+* u64 plaintext_len, // Length of data in bytes for encryption.
+*/
+ENTRY(aesni_gcm_enc_update)
+ FUNC_SAVE
+ GCM_ENC_DEC enc
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_enc_update)
- # Output %r13 bytes
- MOVQ_R64_XMM %xmm0, %rax
- cmp $8, %r13
- jle _less_than_8_bytes_left_encrypt
- mov %rax, (%arg2 , %r11, 1)
- add $8, %r11
- psrldq $8, %xmm0
- MOVQ_R64_XMM %xmm0, %rax
- sub $8, %r13
-_less_than_8_bytes_left_encrypt:
- mov %al, (%arg2, %r11, 1)
- add $1, %r11
- shr $8, %rax
- sub $1, %r13
- jne _less_than_8_bytes_left_encrypt
-_multiple_of_16_bytes_encrypt:
- mov arg8, %r12 # %r12 = addLen (number of bytes)
- shl $3, %r12
- movd %r12d, %xmm15 # len(A) in %xmm15
- shl $3, %arg4 # len(C) in bits (*128)
- MOVQ_R64_XMM %arg4, %xmm1
- pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
- pxor %xmm1, %xmm15 # %xmm15 = len(A)||len(C)
- pxor %xmm15, %xmm8
- GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
- # final GHASH computation
- movdqa SHUF_MASK(%rip), %xmm10
- PSHUFB_XMM %xmm10, %xmm8 # perform a 16 byte swap
+/*****************************************************************************
+* void aesni_gcm_dec_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data,
+* // context data
+* u8 *out, // Ciphertext output. Encrypt in-place is allowed.
+* const u8 *in, // Plaintext input
+* u64 plaintext_len, // Length of data in bytes for encryption.
+*/
+ENTRY(aesni_gcm_dec_update)
+ FUNC_SAVE
+ GCM_ENC_DEC dec
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_dec_update)
- mov %arg5, %rax # %rax = *Y0
- movdqu (%rax), %xmm0 # %xmm0 = Y0
- ENCRYPT_SINGLE_BLOCK %xmm0, %xmm15 # Encrypt(K, Y0)
- pxor %xmm8, %xmm0
-_return_T_encrypt:
- mov arg9, %r10 # %r10 = authTag
- mov arg10, %r11 # %r11 = auth_tag_len
- cmp $16, %r11
- je _T_16_encrypt
- cmp $8, %r11
- jl _T_4_encrypt
-_T_8_encrypt:
- MOVQ_R64_XMM %xmm0, %rax
- mov %rax, (%r10)
- add $8, %r10
- sub $8, %r11
- psrldq $8, %xmm0
- cmp $0, %r11
- je _return_T_done_encrypt
-_T_4_encrypt:
- movd %xmm0, %eax
- mov %eax, (%r10)
- add $4, %r10
- sub $4, %r11
- psrldq $4, %xmm0
- cmp $0, %r11
- je _return_T_done_encrypt
-_T_123_encrypt:
- movd %xmm0, %eax
- cmp $2, %r11
- jl _T_1_encrypt
- mov %ax, (%r10)
- cmp $2, %r11
- je _return_T_done_encrypt
- add $2, %r10
- sar $16, %eax
-_T_1_encrypt:
- mov %al, (%r10)
- jmp _return_T_done_encrypt
-_T_16_encrypt:
- movdqu %xmm0, (%r10)
-_return_T_done_encrypt:
- mov %r14, %rsp
- pop %r14
- pop %r13
- pop %r12
+/*****************************************************************************
+* void aesni_gcm_finalize(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary.
+* struct gcm_context_data *data,
+* // context data
+* u8 *auth_tag, // Authenticated Tag output.
+* u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely),
+* // 12 or 8.
+*/
+ENTRY(aesni_gcm_finalize)
+ FUNC_SAVE
+ GCM_COMPLETE %arg3 %arg4
+ FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_enc)
+ENDPROC(aesni_gcm_finalize)
#endif
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 34cf1c1f8c98..acbe7e8336d8 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -72,6 +72,21 @@ struct aesni_xts_ctx {
u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
};
+#define GCM_BLOCK_LEN 16
+
+struct gcm_context_data {
+ /* init, update and finalize context data */
+ u8 aad_hash[GCM_BLOCK_LEN];
+ u64 aad_length;
+ u64 in_length;
+ u8 partial_block_enc_key[GCM_BLOCK_LEN];
+ u8 orig_IV[GCM_BLOCK_LEN];
+ u8 current_counter[GCM_BLOCK_LEN];
+ u64 partial_block_len;
+ u64 unused;
+ u8 hash_keys[GCM_BLOCK_LEN * 8];
+};
+
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
@@ -105,6 +120,7 @@ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
/* asmlinkage void aesni_gcm_enc()
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
+ * struct gcm_context_data. May be uninitialized.
* u8 *out, Ciphertext output. Encrypt in-place is allowed.
* const u8 *in, Plaintext input
* unsigned long plaintext_len, Length of data in bytes for encryption.
@@ -117,13 +133,15 @@ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
* unsigned long auth_tag_len), Authenticated Tag Length in bytes.
* Valid values are 16 (most likely), 12 or 8.
*/
-asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
+asmlinkage void aesni_gcm_enc(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
/* asmlinkage void aesni_gcm_dec()
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
+ * struct gcm_context_data. May be uninitialized.
* u8 *out, Plaintext output. Decrypt in-place is allowed.
* const u8 *in, Ciphertext input
* unsigned long ciphertext_len, Length of data in bytes for decryption.
@@ -137,11 +155,28 @@ asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
* unsigned long auth_tag_len) Authenticated Tag Length in bytes.
* Valid values are 16 (most likely), 12 or 8.
*/
-asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
+asmlinkage void aesni_gcm_dec(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
+/* Scatter / Gather routines, with args similar to above */
+asmlinkage void aesni_gcm_init(void *ctx,
+ struct gcm_context_data *gdata,
+ u8 *iv,
+ u8 *hash_subkey, const u8 *aad,
+ unsigned long aad_len);
+asmlinkage void aesni_gcm_enc_update(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
+ const u8 *in, unsigned long plaintext_len);
+asmlinkage void aesni_gcm_dec_update(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
+ const u8 *in,
+ unsigned long ciphertext_len);
+asmlinkage void aesni_gcm_finalize(void *ctx,
+ struct gcm_context_data *gdata,
+ u8 *auth_tag, unsigned long auth_tag_len);
#ifdef CONFIG_AS_AVX
asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
@@ -167,15 +202,17 @@ asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
-static void aesni_gcm_enc_avx(void *ctx, u8 *out,
+static void aesni_gcm_enc_avx(void *ctx,
+ struct gcm_context_data *data, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
- aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
- aad_len, auth_tag, auth_tag_len);
+ aesni_gcm_enc(ctx, data, out, in,
+ plaintext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
} else {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
@@ -183,15 +220,17 @@ static void aesni_gcm_enc_avx(void *ctx, u8 *out,
}
}
-static void aesni_gcm_dec_avx(void *ctx, u8 *out,
+static void aesni_gcm_dec_avx(void *ctx,
+ struct gcm_context_data *data, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
- aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
- aad_len, auth_tag, auth_tag_len);
+ aesni_gcm_dec(ctx, data, out, in,
+ ciphertext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
} else {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
@@ -218,15 +257,17 @@ asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
-static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
+static void aesni_gcm_enc_avx2(void *ctx,
+ struct gcm_context_data *data, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
- aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
- aad_len, auth_tag, auth_tag_len);
+ aesni_gcm_enc(ctx, data, out, in,
+ plaintext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
} else if (plaintext_len < AVX_GEN4_OPTSIZE) {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
@@ -238,15 +279,17 @@ static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
}
}
-static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
+static void aesni_gcm_dec_avx2(void *ctx,
+ struct gcm_context_data *data, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len)
{
struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
- aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
- aad, aad_len, auth_tag, auth_tag_len);
+ aesni_gcm_dec(ctx, data, out, in,
+ ciphertext_len, iv, hash_subkey,
+ aad, aad_len, auth_tag, auth_tag_len);
} else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
@@ -259,15 +302,19 @@ static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
}
#endif
-static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
- const u8 *in, unsigned long plaintext_len, u8 *iv,
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len);
+static void (*aesni_gcm_enc_tfm)(void *ctx,
+ struct gcm_context_data *data, u8 *out,
+ const u8 *in, unsigned long plaintext_len,
+ u8 *iv, u8 *hash_subkey, const u8 *aad,
+ unsigned long aad_len, u8 *auth_tag,
+ unsigned long auth_tag_len);
-static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
- const u8 *in, unsigned long ciphertext_len, u8 *iv,
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len);
+static void (*aesni_gcm_dec_tfm)(void *ctx,
+ struct gcm_context_data *data, u8 *out,
+ const u8 *in, unsigned long ciphertext_len,
+ u8 *iv, u8 *hash_subkey, const u8 *aad,
+ unsigned long aad_len, u8 *auth_tag,
+ unsigned long auth_tag_len);
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
@@ -744,6 +791,127 @@ static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
return 0;
}
+static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ unsigned int assoclen, u8 *hash_subkey,
+ u8 *iv, void *aes_ctx)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+ struct gcm_context_data data AESNI_ALIGN_ATTR;
+ struct scatter_walk dst_sg_walk = {};
+ unsigned long left = req->cryptlen;
+ unsigned long len, srclen, dstlen;
+ struct scatter_walk assoc_sg_walk;
+ struct scatter_walk src_sg_walk;
+ struct scatterlist src_start[2];
+ struct scatterlist dst_start[2];
+ struct scatterlist *src_sg;
+ struct scatterlist *dst_sg;
+ u8 *src, *dst, *assoc;
+ u8 *assocmem = NULL;
+ u8 authTag[16];
+
+ if (!enc)
+ left -= auth_tag_len;
+
+ /* Linearize assoc, if not already linear */
+ if (req->src->length >= assoclen && req->src->length &&
+ (!PageHighMem(sg_page(req->src)) ||
+ req->src->offset + req->src->length < PAGE_SIZE)) {
+ scatterwalk_start(&assoc_sg_walk, req->src);
+ assoc = scatterwalk_map(&assoc_sg_walk);
+ } else {
+ /* assoc can be any length, so must be on heap */
+ assocmem = kmalloc(assoclen, GFP_ATOMIC);
+ if (unlikely(!assocmem))
+ return -ENOMEM;
+ assoc = assocmem;
+
+ scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
+ }
+
+ src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
+ scatterwalk_start(&src_sg_walk, src_sg);
+ if (req->src != req->dst) {
+ dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
+ scatterwalk_start(&dst_sg_walk, dst_sg);
+ }
+
+ kernel_fpu_begin();
+ aesni_gcm_init(aes_ctx, &data, iv,
+ hash_subkey, assoc, assoclen);
+ if (req->src != req->dst) {
+ while (left) {
+ src = scatterwalk_map(&src_sg_walk);
+ dst = scatterwalk_map(&dst_sg_walk);
+ srclen = scatterwalk_clamp(&src_sg_walk, left);
+ dstlen = scatterwalk_clamp(&dst_sg_walk, left);
+ len = min(srclen, dstlen);
+ if (len) {
+ if (enc)
+ aesni_gcm_enc_update(aes_ctx, &data,
+ dst, src, len);
+ else
+ aesni_gcm_dec_update(aes_ctx, &data,
+ dst, src, len);
+ }
+ left -= len;
+
+ scatterwalk_unmap(src);
+ scatterwalk_unmap(dst);
+ scatterwalk_advance(&src_sg_walk, len);
+ scatterwalk_advance(&dst_sg_walk, len);
+ scatterwalk_done(&src_sg_walk, 0, left);
+ scatterwalk_done(&dst_sg_walk, 1, left);
+ }
+ } else {
+ while (left) {
+ dst = src = scatterwalk_map(&src_sg_walk);
+ len = scatterwalk_clamp(&src_sg_walk, left);
+ if (len) {
+ if (enc)
+ aesni_gcm_enc_update(aes_ctx, &data,
+ src, src, len);
+ else
+ aesni_gcm_dec_update(aes_ctx, &data,
+ src, src, len);
+ }
+ left -= len;
+ scatterwalk_unmap(src);
+ scatterwalk_advance(&src_sg_walk, len);
+ scatterwalk_done(&src_sg_walk, 1, left);
+ }
+ }
+ aesni_gcm_finalize(aes_ctx, &data, authTag, auth_tag_len);
+ kernel_fpu_end();
+
+ if (!assocmem)
+ scatterwalk_unmap(assoc);
+ else
+ kfree(assocmem);
+
+ if (!enc) {
+ u8 authTagMsg[16];
+
+ /* Copy out original authTag */
+ scatterwalk_map_and_copy(authTagMsg, req->src,
+ req->assoclen + req->cryptlen -
+ auth_tag_len,
+ auth_tag_len, 0);
+
+ /* Compare generated tag with passed in tag. */
+ return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
+ -EBADMSG : 0;
+ }
+
+ /* Copy in the authTag */
+ scatterwalk_map_and_copy(authTag, req->dst,
+ req->assoclen + req->cryptlen,
+ auth_tag_len, 1);
+
+ return 0;
+}
+
static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
@@ -753,7 +921,14 @@ static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
struct scatter_walk src_sg_walk;
struct scatter_walk dst_sg_walk = {};
+ struct gcm_context_data data AESNI_ALIGN_ATTR;
+ if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
+ aesni_gcm_enc_tfm == aesni_gcm_enc ||
+ req->cryptlen < AVX_GEN2_OPTSIZE) {
+ return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
+ aes_ctx);
+ }
if (sg_is_last(req->src) &&
(!PageHighMem(sg_page(req->src)) ||
req->src->offset + req->src->length <= PAGE_SIZE) &&
@@ -782,7 +957,7 @@ static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
}
kernel_fpu_begin();
- aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
+ aesni_gcm_enc_tfm(aes_ctx, &data, dst, src, req->cryptlen, iv,
hash_subkey, assoc, assoclen,
dst + req->cryptlen, auth_tag_len);
kernel_fpu_end();
@@ -817,8 +992,15 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
u8 authTag[16];
struct scatter_walk src_sg_walk;
struct scatter_walk dst_sg_walk = {};
+ struct gcm_context_data data AESNI_ALIGN_ATTR;
int retval = 0;
+ if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
+ aesni_gcm_enc_tfm == aesni_gcm_enc ||
+ req->cryptlen < AVX_GEN2_OPTSIZE) {
+ return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
+ aes_ctx);
+ }
tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
if (sg_is_last(req->src) &&
@@ -849,7 +1031,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
kernel_fpu_begin();
- aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
+ aesni_gcm_dec_tfm(aes_ctx, &data, dst, src, tempCipherLen, iv,
hash_subkey, assoc, assoclen,
authTag, auth_tag_len);
kernel_fpu_end();
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index f9eca34301e2..3e0c07cc9124 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -25,13 +25,13 @@
*
*/
-#include <asm/processor.h>
+#include <crypto/algapi.h>
#include <crypto/blowfish.h>
+#include <crypto/internal/skcipher.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <crypto/algapi.h>
/* regular block cipher functions */
asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src,
@@ -77,20 +77,28 @@ static void blowfish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
-static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+static int blowfish_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return blowfish_setkey(&tfm->base, key, keylen);
+}
+
+static int ecb_crypt(struct skcipher_request *req,
void (*fn)(struct bf_ctx *, u8 *, const u8 *),
void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *))
{
- struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes;
int err;
- err = blkcipher_walk_virt(desc, walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk->nbytes)) {
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
+ while ((nbytes = walk.nbytes)) {
+ u8 *wsrc = walk.src.virt.addr;
+ u8 *wdst = walk.dst.virt.addr;
/* Process four block batch */
if (nbytes >= bsize * 4) {
@@ -116,34 +124,25 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
} while (nbytes >= bsize);
done:
- err = blkcipher_walk_done(desc, walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way);
+ return ecb_crypt(req, blowfish_enc_blk, blowfish_enc_blk_4way);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, blowfish_dec_blk, blowfish_dec_blk_4way);
+ return ecb_crypt(req, blowfish_dec_blk, blowfish_dec_blk_4way);
}
-static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __cbc_encrypt(struct bf_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -164,27 +163,27 @@ static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
return nbytes;
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __cbc_encrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __cbc_encrypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __cbc_decrypt(struct bf_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -245,24 +244,25 @@ done:
return nbytes;
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __cbc_decrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __cbc_decrypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk)
+static void ctr_crypt_final(struct bf_ctx *ctx, struct skcipher_walk *walk)
{
u8 *ctrblk = walk->iv;
u8 keystream[BF_BLOCK_SIZE];
@@ -276,10 +276,8 @@ static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk)
crypto_inc(ctrblk, BF_BLOCK_SIZE);
}
-static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __ctr_crypt(struct bf_ctx *ctx, struct skcipher_walk *walk)
{
- struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = BF_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -332,29 +330,30 @@ done:
return nbytes;
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct bf_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) {
- nbytes = __ctr_crypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __ctr_crypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
- if (walk.nbytes) {
- ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
+ if (nbytes) {
+ ctr_crypt_final(ctx, &walk);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
}
-static struct crypto_alg bf_algs[4] = { {
+static struct crypto_alg bf_cipher_alg = {
.cra_name = "blowfish",
.cra_driver_name = "blowfish-asm",
.cra_priority = 200,
@@ -372,66 +371,50 @@ static struct crypto_alg bf_algs[4] = { {
.cia_decrypt = blowfish_decrypt,
}
}
-}, {
- .cra_name = "ecb(blowfish)",
- .cra_driver_name = "ecb-blowfish-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = BF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct bf_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = BF_MIN_KEY_SIZE,
- .max_keysize = BF_MAX_KEY_SIZE,
- .setkey = blowfish_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
+};
+
+static struct skcipher_alg bf_skcipher_algs[] = {
+ {
+ .base.cra_name = "ecb(blowfish)",
+ .base.cra_driver_name = "ecb-blowfish-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = BF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct bf_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = BF_MIN_KEY_SIZE,
+ .max_keysize = BF_MAX_KEY_SIZE,
+ .setkey = blowfish_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(blowfish)",
+ .base.cra_driver_name = "cbc-blowfish-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = BF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct bf_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = BF_MIN_KEY_SIZE,
+ .max_keysize = BF_MAX_KEY_SIZE,
+ .ivsize = BF_BLOCK_SIZE,
+ .setkey = blowfish_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "ctr(blowfish)",
+ .base.cra_driver_name = "ctr-blowfish-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct bf_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = BF_MIN_KEY_SIZE,
+ .max_keysize = BF_MAX_KEY_SIZE,
+ .ivsize = BF_BLOCK_SIZE,
+ .chunksize = BF_BLOCK_SIZE,
+ .setkey = blowfish_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
},
-}, {
- .cra_name = "cbc(blowfish)",
- .cra_driver_name = "cbc-blowfish-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = BF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct bf_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = BF_MIN_KEY_SIZE,
- .max_keysize = BF_MAX_KEY_SIZE,
- .ivsize = BF_BLOCK_SIZE,
- .setkey = blowfish_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(blowfish)",
- .cra_driver_name = "ctr-blowfish-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct bf_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = BF_MIN_KEY_SIZE,
- .max_keysize = BF_MAX_KEY_SIZE,
- .ivsize = BF_BLOCK_SIZE,
- .setkey = blowfish_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-} };
+};
static bool is_blacklisted_cpu(void)
{
@@ -456,6 +439,8 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
static int __init init(void)
{
+ int err;
+
if (!force && is_blacklisted_cpu()) {
printk(KERN_INFO
"blowfish-x86_64: performance on this CPU "
@@ -464,12 +449,23 @@ static int __init init(void)
return -ENODEV;
}
- return crypto_register_algs(bf_algs, ARRAY_SIZE(bf_algs));
+ err = crypto_register_alg(&bf_cipher_alg);
+ if (err)
+ return err;
+
+ err = crypto_register_skciphers(bf_skcipher_algs,
+ ARRAY_SIZE(bf_skcipher_algs));
+ if (err)
+ crypto_unregister_alg(&bf_cipher_alg);
+
+ return err;
}
static void __exit fini(void)
{
- crypto_unregister_algs(bf_algs, ARRAY_SIZE(bf_algs));
+ crypto_unregister_alg(&bf_cipher_alg);
+ crypto_unregister_skciphers(bf_skcipher_algs,
+ ARRAY_SIZE(bf_skcipher_algs));
}
module_init(init);
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 60907c139c4e..d4992e458f92 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -10,18 +10,15 @@
*
*/
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <linux/err.h>
-#include <crypto/ablk_helper.h>
-#include <crypto/algapi.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
-#include <asm/fpu/api.h>
#include <asm/crypto/camellia.h>
#include <asm/crypto/glue_helper.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
+#include <crypto/xts.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
#define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
@@ -150,413 +147,120 @@ static const struct common_glue_ctx camellia_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
{
- return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
+ return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
+ &tfm->base.crt_flags);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&camellia_enc, req);
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
- dst, src, nbytes);
-}
-
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
- nbytes);
-}
-
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
-}
-
-static inline bool camellia_fpu_begin(bool fpu_enabled, unsigned int nbytes)
-{
- return glue_fpu_begin(CAMELLIA_BLOCK_SIZE,
- CAMELLIA_AESNI_PARALLEL_BLOCKS, NULL, fpu_enabled,
- nbytes);
-}
-
-static inline void camellia_fpu_end(bool fpu_enabled)
-{
- glue_fpu_end(fpu_enabled);
-}
-
-static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len)
-{
- return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
- &tfm->crt_flags);
-}
-
-struct crypt_priv {
- struct camellia_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_enc_32way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
- }
-
- if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- }
-
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
- camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_enc_blk(ctx->ctx, srcdst, srcdst);
+ return glue_ecb_req_128bit(&camellia_dec, req);
}
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_dec_32way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS;
- }
-
- if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- }
-
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
- camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_dec_blk(ctx->ctx, srcdst, srcdst);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
+ req);
}
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->camellia_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- camellia_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
+ return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
}
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->camellia_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- camellia_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
+ return glue_ctr_req_128bit(&camellia_ctr, req);
}
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_crypt_128bit(&camellia_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(camellia_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ return glue_xts_req_128bit(&camellia_enc_xts, req,
+ XTS_TWEAK_CAST(camellia_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_crypt_128bit(&camellia_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(camellia_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ return glue_xts_req_128bit(&camellia_dec_xts, req,
+ XTS_TWEAK_CAST(camellia_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static struct crypto_alg cmll_algs[10] = { {
- .cra_name = "__ecb-camellia-aesni-avx2",
- .cra_driver_name = "__driver-ecb-camellia-aesni-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-camellia-aesni-avx2",
- .cra_driver_name = "__driver-cbc-camellia-aesni-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = camellia_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-camellia-aesni-avx2",
- .cra_driver_name = "__driver-ctr-camellia-aesni-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-camellia-aesni-avx2",
- .cra_driver_name = "__driver-lrw-camellia-aesni-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_camellia_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = lrw_camellia_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-camellia-aesni-avx2",
- .cra_driver_name = "__driver-xts-camellia-aesni-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = xts_camellia_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(camellia)",
- .cra_driver_name = "ecb-camellia-aesni-avx2",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(camellia)",
- .cra_driver_name = "cbc-camellia-aesni-avx2",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(camellia)",
- .cra_driver_name = "ctr-camellia-aesni-avx2",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(camellia)",
- .cra_driver_name = "lrw-camellia-aesni-avx2",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(camellia)",
- .cra_driver_name = "xts-camellia-aesni-avx2",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg camellia_algs[] = {
+ {
+ .base.cra_name = "__ecb(camellia)",
+ .base.cra_driver_name = "__ecb-camellia-aesni-avx2",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(camellia)",
+ .base.cra_driver_name = "__cbc-camellia-aesni-avx2",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(camellia)",
+ .base.cra_driver_name = "__ctr-camellia-aesni-avx2",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .chunksize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(camellia)",
+ .base.cra_driver_name = "__xts-camellia-aesni-avx2",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = xts_camellia_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
static int __init camellia_aesni_init(void)
{
@@ -576,12 +280,15 @@ static int __init camellia_aesni_init(void)
return -ENODEV;
}
- return crypto_register_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
+ return simd_register_skciphers_compat(camellia_algs,
+ ARRAY_SIZE(camellia_algs),
+ camellia_simd_algs);
}
static void __exit camellia_aesni_fini(void)
{
- crypto_unregister_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
+ simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
+ camellia_simd_algs);
}
module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index d96429da88eb..d09f6521466a 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -10,18 +10,15 @@
*
*/
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <linux/err.h>
-#include <crypto/ablk_helper.h>
-#include <crypto/algapi.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
-#include <asm/fpu/api.h>
#include <asm/crypto/camellia.h>
#include <asm/crypto/glue_helper.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
+#include <crypto/xts.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
#define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
@@ -154,401 +151,142 @@ static const struct common_glue_ctx camellia_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
{
- return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes);
+ return __camellia_setkey(crypto_skcipher_ctx(tfm), key, keylen,
+ &tfm->base.crt_flags);
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
- dst, src, nbytes);
+ return glue_ecb_req_128bit(&camellia_enc, req);
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
- nbytes);
+ return glue_ecb_req_128bit(&camellia_dec, req);
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
+ req);
}
-static inline bool camellia_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_fpu_begin(CAMELLIA_BLOCK_SIZE,
- CAMELLIA_AESNI_PARALLEL_BLOCKS, NULL, fpu_enabled,
- nbytes);
+ return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
}
-static inline void camellia_fpu_end(bool fpu_enabled)
+static int ctr_crypt(struct skcipher_request *req)
{
- glue_fpu_end(fpu_enabled);
+ return glue_ctr_req_128bit(&camellia_ctr, req);
}
-static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len)
+int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
{
- return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
- &tfm->crt_flags);
+ struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 *flags = &tfm->base.crt_flags;
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ /* first half of xts-key is for crypt */
+ err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
+ if (err)
+ return err;
+
+ /* second half of xts-key is for tweak */
+ return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
+ flags);
}
+EXPORT_SYMBOL_GPL(xts_camellia_setkey);
-struct crypt_priv {
- struct camellia_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- }
-
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
- camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_enc_blk(ctx->ctx, srcdst, srcdst);
+ return glue_xts_req_128bit(&camellia_enc_xts, req,
+ XTS_TWEAK_CAST(camellia_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
- camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
- }
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
- camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
- nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_dec_blk(ctx->ctx, srcdst, srcdst);
+ return glue_xts_req_128bit(&camellia_dec_xts, req,
+ XTS_TWEAK_CAST(camellia_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->camellia_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- camellia_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->camellia_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- camellia_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&camellia_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(camellia_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&camellia_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(camellia_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static struct crypto_alg cmll_algs[10] = { {
- .cra_name = "__ecb-camellia-aesni",
- .cra_driver_name = "__driver-ecb-camellia-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-camellia-aesni",
- .cra_driver_name = "__driver-cbc-camellia-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = camellia_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-camellia-aesni",
- .cra_driver_name = "__driver-ctr-camellia-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-camellia-aesni",
- .cra_driver_name = "__driver-lrw-camellia-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_camellia_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = lrw_camellia_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-camellia-aesni",
- .cra_driver_name = "__driver-xts-camellia-aesni",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = xts_camellia_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(camellia)",
- .cra_driver_name = "ecb-camellia-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(camellia)",
- .cra_driver_name = "cbc-camellia-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(camellia)",
- .cra_driver_name = "ctr-camellia-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(camellia)",
- .cra_driver_name = "lrw-camellia-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(camellia)",
- .cra_driver_name = "xts-camellia-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg camellia_algs[] = {
+ {
+ .base.cra_name = "__ecb(camellia)",
+ .base.cra_driver_name = "__ecb-camellia-aesni",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(camellia)",
+ .base.cra_driver_name = "__cbc-camellia-aesni",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(camellia)",
+ .base.cra_driver_name = "__ctr-camellia-aesni",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .chunksize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(camellia)",
+ .base.cra_driver_name = "__xts-camellia-aesni",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = 2 * CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = xts_camellia_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *camellia_simd_algs[ARRAY_SIZE(camellia_algs)];
static int __init camellia_aesni_init(void)
{
@@ -567,12 +305,15 @@ static int __init camellia_aesni_init(void)
return -ENODEV;
}
- return crypto_register_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
+ return simd_register_skciphers_compat(camellia_algs,
+ ARRAY_SIZE(camellia_algs),
+ camellia_simd_algs);
}
static void __exit camellia_aesni_fini(void)
{
- crypto_unregister_algs(cmll_algs, ARRAY_SIZE(cmll_algs));
+ simd_unregister_skciphers(camellia_algs, ARRAY_SIZE(camellia_algs),
+ camellia_simd_algs);
}
module_init(camellia_aesni_init);
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index af4840ab2a3d..dcd5e0f71b00 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -23,15 +23,12 @@
*
*/
-#include <asm/processor.h>
#include <asm/unaligned.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/algapi.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
#include <asm/crypto/camellia.h>
#include <asm/crypto/glue_helper.h>
@@ -1272,13 +1269,19 @@ int __camellia_setkey(struct camellia_ctx *cctx, const unsigned char *key,
}
EXPORT_SYMBOL_GPL(__camellia_setkey);
-static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+static int camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
- return __camellia_setkey(crypto_tfm_ctx(tfm), in_key, key_len,
+ return __camellia_setkey(crypto_tfm_ctx(tfm), key, key_len,
&tfm->crt_flags);
}
+static int camellia_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ return camellia_setkey(&tfm->base, key, key_len);
+}
+
void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
{
u128 iv = *src;
@@ -1373,188 +1376,33 @@ static const struct common_glue_ctx camellia_dec_cbc = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc,
- dst, src, nbytes);
-}
-
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src,
- nbytes);
-}
-
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
-}
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct camellia_ctx *ctx = priv;
- int i;
-
- while (nbytes >= 2 * bsize) {
- camellia_enc_blk_2way(ctx, srcdst, srcdst);
- srcdst += bsize * 2;
- nbytes -= bsize * 2;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_enc_blk(ctx, srcdst, srcdst);
-}
-
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
- struct camellia_ctx *ctx = priv;
- int i;
-
- while (nbytes >= 2 * bsize) {
- camellia_dec_blk_2way(ctx, srcdst, srcdst);
- srcdst += bsize * 2;
- nbytes -= bsize * 2;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- camellia_dec_blk(ctx, srcdst, srcdst);
-}
-
-int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = __camellia_setkey(&ctx->camellia_ctx, key,
- keylen - CAMELLIA_BLOCK_SIZE,
- &tfm->crt_flags);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->lrw_table,
- key + keylen - CAMELLIA_BLOCK_SIZE);
-}
-EXPORT_SYMBOL_GPL(lrw_camellia_setkey);
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[2 * 4];
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &ctx->camellia_ctx,
- .crypt_fn = encrypt_callback,
- };
-
- return lrw_crypt(desc, dst, src, nbytes, &req);
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[2 * 4];
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &ctx->camellia_ctx,
- .crypt_fn = decrypt_callback,
- };
-
- return lrw_crypt(desc, dst, src, nbytes, &req);
+ return glue_ecb_req_128bit(&camellia_enc, req);
}
-void lrw_camellia_exit_tfm(struct crypto_tfm *tfm)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct camellia_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lrw_free_table(&ctx->lrw_table);
+ return glue_ecb_req_128bit(&camellia_dec, req);
}
-EXPORT_SYMBOL_GPL(lrw_camellia_exit_tfm);
-int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct camellia_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
- int err;
-
- err = xts_check_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __camellia_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __camellia_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
- flags);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
+ req);
}
-EXPORT_SYMBOL_GPL(xts_camellia_setkey);
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[2 * 4];
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk),
- .crypt_ctx = &ctx->crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
-
- return xts_crypt(desc, dst, src, nbytes, &req);
+ return glue_cbc_decrypt_req_128bit(&camellia_dec_cbc, req);
}
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[2 * 4];
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(camellia_enc_blk),
- .crypt_ctx = &ctx->crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
-
- return xts_crypt(desc, dst, src, nbytes, &req);
+ return glue_ctr_req_128bit(&camellia_ctr, req);
}
-static struct crypto_alg camellia_algs[6] = { {
+static struct crypto_alg camellia_cipher_alg = {
.cra_name = "camellia",
.cra_driver_name = "camellia-asm",
.cra_priority = 200,
@@ -1572,109 +1420,50 @@ static struct crypto_alg camellia_algs[6] = { {
.cia_decrypt = camellia_decrypt
}
}
-}, {
- .cra_name = "ecb(camellia)",
- .cra_driver_name = "ecb-camellia-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(camellia)",
- .cra_driver_name = "cbc-camellia-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(camellia)",
- .cra_driver_name = "ctr-camellia-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = camellia_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "lrw(camellia)",
- .cra_driver_name = "lrw-camellia-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_camellia_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE +
- CAMELLIA_BLOCK_SIZE,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = lrw_camellia_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "xts(camellia)",
- .cra_driver_name = "xts-camellia-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = CAMELLIA_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct camellia_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAMELLIA_MIN_KEY_SIZE * 2,
- .max_keysize = CAMELLIA_MAX_KEY_SIZE * 2,
- .ivsize = CAMELLIA_BLOCK_SIZE,
- .setkey = xts_camellia_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-} };
+};
+
+static struct skcipher_alg camellia_skcipher_algs[] = {
+ {
+ .base.cra_name = "ecb(camellia)",
+ .base.cra_driver_name = "ecb-camellia-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .setkey = camellia_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(camellia)",
+ .base.cra_driver_name = "cbc-camellia-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "ctr(camellia)",
+ .base.cra_driver_name = "ctr-camellia-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct camellia_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .chunksize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }
+};
static bool is_blacklisted_cpu(void)
{
@@ -1700,6 +1489,8 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
static int __init init(void)
{
+ int err;
+
if (!force && is_blacklisted_cpu()) {
printk(KERN_INFO
"camellia-x86_64: performance on this CPU "
@@ -1708,12 +1499,23 @@ static int __init init(void)
return -ENODEV;
}
- return crypto_register_algs(camellia_algs, ARRAY_SIZE(camellia_algs));
+ err = crypto_register_alg(&camellia_cipher_alg);
+ if (err)
+ return err;
+
+ err = crypto_register_skciphers(camellia_skcipher_algs,
+ ARRAY_SIZE(camellia_skcipher_algs));
+ if (err)
+ crypto_unregister_alg(&camellia_cipher_alg);
+
+ return err;
}
static void __exit fini(void)
{
- crypto_unregister_algs(camellia_algs, ARRAY_SIZE(camellia_algs));
+ crypto_unregister_alg(&camellia_cipher_alg);
+ crypto_unregister_skciphers(camellia_skcipher_algs,
+ ARRAY_SIZE(camellia_skcipher_algs));
}
module_init(init);
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index dbea6020ffe7..41034745d6a2 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -21,18 +21,14 @@
*
*/
-#include <linux/module.h>
-#include <linux/hardirq.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-#include <linux/err.h>
-#include <crypto/ablk_helper.h>
+#include <asm/crypto/glue_helper.h>
#include <crypto/algapi.h>
#include <crypto/cast5.h>
-#include <crypto/cryptd.h>
-#include <crypto/ctr.h>
-#include <asm/fpu/api.h>
-#include <asm/crypto/glue_helper.h>
+#include <crypto/internal/simd.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
#define CAST5_PARALLEL_BLOCKS 16
@@ -45,10 +41,17 @@ asmlinkage void cast5_cbc_dec_16way(struct cast5_ctx *ctx, u8 *dst,
asmlinkage void cast5_ctr_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src,
__be64 *iv);
-static inline bool cast5_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int cast5_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return cast5_setkey(&tfm->base, key, keylen);
+}
+
+static inline bool cast5_fpu_begin(bool fpu_enabled, struct skcipher_walk *walk,
+ unsigned int nbytes)
{
return glue_fpu_begin(CAST5_BLOCK_SIZE, CAST5_PARALLEL_BLOCKS,
- NULL, fpu_enabled, nbytes);
+ walk, fpu_enabled, nbytes);
}
static inline void cast5_fpu_end(bool fpu_enabled)
@@ -56,29 +59,28 @@ static inline void cast5_fpu_end(bool fpu_enabled)
return glue_fpu_end(fpu_enabled);
}
-static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
- bool enc)
+static int ecb_crypt(struct skcipher_request *req, bool enc)
{
bool fpu_enabled = false;
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes;
void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
int err;
- fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
-
- err = blkcipher_walk_virt(desc, walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk->nbytes)) {
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
+ while ((nbytes = walk.nbytes)) {
+ u8 *wsrc = walk.src.virt.addr;
+ u8 *wdst = walk.dst.virt.addr;
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
+ fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
+ fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
do {
fn(ctx, wdst, wsrc);
@@ -103,76 +105,58 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
} while (nbytes >= bsize);
done:
- err = blkcipher_walk_done(desc, walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
cast5_fpu_end(fpu_enabled);
return err;
}
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, true);
+ return ecb_crypt(req, true);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, false);
+ return ecb_crypt(req, false);
}
-static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
- unsigned int nbytes = walk->nbytes;
- u64 *src = (u64 *)walk->src.virt.addr;
- u64 *dst = (u64 *)walk->dst.virt.addr;
- u64 *iv = (u64 *)walk->iv;
-
- do {
- *dst = *src ^ *iv;
- __cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
- iv = dst;
-
- src += 1;
- dst += 1;
- nbytes -= bsize;
- } while (nbytes >= bsize);
-
- *(u64 *)walk->iv = *iv;
- return nbytes;
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __cbc_encrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ u64 *src = (u64 *)walk.src.virt.addr;
+ u64 *dst = (u64 *)walk.dst.virt.addr;
+ u64 *iv = (u64 *)walk.iv;
+
+ do {
+ *dst = *src ^ *iv;
+ __cast5_encrypt(ctx, (u8 *)dst, (u8 *)dst);
+ iv = dst;
+ src++;
+ dst++;
+ nbytes -= bsize;
+ } while (nbytes >= bsize);
+
+ *(u64 *)walk.iv = *iv;
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __cbc_decrypt(struct cast5_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -224,31 +208,29 @@ done:
return nbytes;
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
bool fpu_enabled = false;
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
- nbytes = __cbc_decrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
+ nbytes = __cbc_decrypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
cast5_fpu_end(fpu_enabled);
return err;
}
-static void ctr_crypt_final(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static void ctr_crypt_final(struct skcipher_walk *walk, struct cast5_ctx *ctx)
{
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
u8 *ctrblk = walk->iv;
u8 keystream[CAST5_BLOCK_SIZE];
u8 *src = walk->src.virt.addr;
@@ -261,10 +243,9 @@ static void ctr_crypt_final(struct blkcipher_desc *desc,
crypto_inc(ctrblk, CAST5_BLOCK_SIZE);
}
-static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __ctr_crypt(struct skcipher_walk *walk,
+ struct cast5_ctx *ctx)
{
- struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -307,162 +288,80 @@ done:
return nbytes;
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast5_ctx *ctx = crypto_skcipher_ctx(tfm);
bool fpu_enabled = false;
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, CAST5_BLOCK_SIZE);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
- nbytes = __ctr_crypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ fpu_enabled = cast5_fpu_begin(fpu_enabled, &walk, nbytes);
+ nbytes = __ctr_crypt(&walk, ctx);
+ err = skcipher_walk_done(&walk, nbytes);
}
cast5_fpu_end(fpu_enabled);
if (walk.nbytes) {
- ctr_crypt_final(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
+ ctr_crypt_final(&walk, ctx);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
}
+static struct skcipher_alg cast5_algs[] = {
+ {
+ .base.cra_name = "__ecb(cast5)",
+ .base.cra_driver_name = "__ecb-cast5-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAST5_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cast5_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST5_MIN_KEY_SIZE,
+ .max_keysize = CAST5_MAX_KEY_SIZE,
+ .setkey = cast5_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(cast5)",
+ .base.cra_driver_name = "__cbc-cast5-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAST5_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cast5_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST5_MIN_KEY_SIZE,
+ .max_keysize = CAST5_MAX_KEY_SIZE,
+ .ivsize = CAST5_BLOCK_SIZE,
+ .setkey = cast5_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(cast5)",
+ .base.cra_driver_name = "__ctr-cast5-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct cast5_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST5_MIN_KEY_SIZE,
+ .max_keysize = CAST5_MAX_KEY_SIZE,
+ .ivsize = CAST5_BLOCK_SIZE,
+ .chunksize = CAST5_BLOCK_SIZE,
+ .setkey = cast5_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }
+};
-static struct crypto_alg cast5_algs[6] = { {
- .cra_name = "__ecb-cast5-avx",
- .cra_driver_name = "__driver-ecb-cast5-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST5_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast5_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .setkey = cast5_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-cast5-avx",
- .cra_driver_name = "__driver-cbc-cast5-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST5_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast5_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .setkey = cast5_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-cast5-avx",
- .cra_driver_name = "__driver-ctr-cast5-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cast5_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .ivsize = CAST5_BLOCK_SIZE,
- .setkey = cast5_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "ecb(cast5)",
- .cra_driver_name = "ecb-cast5-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST5_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(cast5)",
- .cra_driver_name = "cbc-cast5-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST5_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .ivsize = CAST5_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(cast5)",
- .cra_driver_name = "ctr-cast5-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST5_MIN_KEY_SIZE,
- .max_keysize = CAST5_MAX_KEY_SIZE,
- .ivsize = CAST5_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-} };
+static struct simd_skcipher_alg *cast5_simd_algs[ARRAY_SIZE(cast5_algs)];
static int __init cast5_init(void)
{
@@ -474,12 +373,15 @@ static int __init cast5_init(void)
return -ENODEV;
}
- return crypto_register_algs(cast5_algs, ARRAY_SIZE(cast5_algs));
+ return simd_register_skciphers_compat(cast5_algs,
+ ARRAY_SIZE(cast5_algs),
+ cast5_simd_algs);
}
static void __exit cast5_exit(void)
{
- crypto_unregister_algs(cast5_algs, ARRAY_SIZE(cast5_algs));
+ simd_unregister_skciphers(cast5_algs, ARRAY_SIZE(cast5_algs),
+ cast5_simd_algs);
}
module_init(cast5_init);
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 50e684768c55..9fb66b5e94b2 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -24,19 +24,13 @@
*/
#include <linux/module.h>
-#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
-#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/cast6.h>
-#include <crypto/cryptd.h>
-#include <crypto/b128ops.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
+#include <crypto/internal/simd.h>
#include <crypto/xts.h>
-#include <asm/fpu/api.h>
#include <asm/crypto/glue_helper.h>
#define CAST6_PARALLEL_BLOCKS 8
@@ -56,6 +50,12 @@ asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst,
asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
+static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return cast6_setkey(&tfm->base, key, keylen);
+}
+
static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
glue_xts_crypt_128bit_one(ctx, dst, src, iv,
@@ -157,164 +157,30 @@ static const struct common_glue_ctx cast6_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&cast6_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&cast6_dec, desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__cast6_encrypt), desc,
- dst, src, nbytes);
-}
-
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_decrypt_128bit(&cast6_dec_cbc, desc, dst, src,
- nbytes);
-}
-
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&cast6_ctr, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&cast6_enc, req);
}
-static inline bool cast6_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_fpu_begin(CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS,
- NULL, fpu_enabled, nbytes);
+ return glue_ecb_req_128bit(&cast6_dec, req);
}
-static inline void cast6_fpu_end(bool fpu_enabled)
+static int cbc_encrypt(struct skcipher_request *req)
{
- glue_fpu_end(fpu_enabled);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__cast6_encrypt),
+ req);
}
-struct crypt_priv {
- struct cast6_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAST6_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
- cast6_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __cast6_encrypt(ctx->ctx, srcdst, srcdst);
+ return glue_cbc_decrypt_req_128bit(&cast6_dec_cbc, req);
}
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- const unsigned int bsize = CAST6_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * CAST6_PARALLEL_BLOCKS) {
- cast6_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __cast6_decrypt(ctx->ctx, srcdst, srcdst);
-}
-
-struct cast6_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- struct cast6_ctx cast6_ctx;
-};
-
-static int lrw_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = __cast6_setkey(&ctx->cast6_ctx, key, keylen - CAST6_BLOCK_SIZE,
- &tfm->crt_flags);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->lrw_table, key + keylen - CAST6_BLOCK_SIZE);
-}
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAST6_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->cast6_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- cast6_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[CAST6_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->cast6_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- cast6_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static void lrw_exit_tfm(struct crypto_tfm *tfm)
-{
- struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lrw_free_table(&ctx->lrw_table);
+ return glue_ctr_req_128bit(&cast6_ctr, req);
}
struct cast6_xts_ctx {
@@ -322,14 +188,14 @@ struct cast6_xts_ctx {
struct cast6_ctx crypt_ctx;
};
-static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
+static int xts_cast6_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
{
- struct cast6_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
+ struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 *flags = &tfm->base.crt_flags;
int err;
- err = xts_check_key(tfm, key, keylen);
+ err = xts_verify_key(tfm, key, keylen);
if (err)
return err;
@@ -343,245 +209,87 @@ static int xts_cast6_setkey(struct crypto_tfm *tfm, const u8 *key,
flags);
}
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_crypt_128bit(&cast6_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__cast6_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ return glue_xts_req_128bit(&cast6_enc_xts, req,
+ XTS_TWEAK_CAST(__cast6_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- return glue_xts_crypt_128bit(&cast6_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__cast6_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ return glue_xts_req_128bit(&cast6_dec_xts, req,
+ XTS_TWEAK_CAST(__cast6_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static struct crypto_alg cast6_algs[10] = { {
- .cra_name = "__ecb-cast6-avx",
- .cra_driver_name = "__driver-ecb-cast6-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast6_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .setkey = cast6_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-cast6-avx",
- .cra_driver_name = "__driver-cbc-cast6-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast6_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .setkey = cast6_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-cast6-avx",
- .cra_driver_name = "__driver-ctr-cast6-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cast6_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = cast6_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-cast6-avx",
- .cra_driver_name = "__driver-lrw-cast6-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast6_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE +
- CAST6_BLOCK_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE +
- CAST6_BLOCK_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = lrw_cast6_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-cast6-avx",
- .cra_driver_name = "__driver-xts-cast6-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cast6_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE * 2,
- .max_keysize = CAST6_MAX_KEY_SIZE * 2,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = xts_cast6_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(cast6)",
- .cra_driver_name = "ecb-cast6-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(cast6)",
- .cra_driver_name = "cbc-cast6-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(cast6)",
- .cra_driver_name = "ctr-cast6-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(cast6)",
- .cra_driver_name = "lrw-cast6-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE +
- CAST6_BLOCK_SIZE,
- .max_keysize = CAST6_MAX_KEY_SIZE +
- CAST6_BLOCK_SIZE,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(cast6)",
- .cra_driver_name = "xts-cast6-avx",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CAST6_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = CAST6_MIN_KEY_SIZE * 2,
- .max_keysize = CAST6_MAX_KEY_SIZE * 2,
- .ivsize = CAST6_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg cast6_algs[] = {
+ {
+ .base.cra_name = "__ecb(cast6)",
+ .base.cra_driver_name = "__ecb-cast6-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAST6_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cast6_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST6_MIN_KEY_SIZE,
+ .max_keysize = CAST6_MAX_KEY_SIZE,
+ .setkey = cast6_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(cast6)",
+ .base.cra_driver_name = "__cbc-cast6-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAST6_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cast6_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST6_MIN_KEY_SIZE,
+ .max_keysize = CAST6_MAX_KEY_SIZE,
+ .ivsize = CAST6_BLOCK_SIZE,
+ .setkey = cast6_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(cast6)",
+ .base.cra_driver_name = "__ctr-cast6-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct cast6_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = CAST6_MIN_KEY_SIZE,
+ .max_keysize = CAST6_MAX_KEY_SIZE,
+ .ivsize = CAST6_BLOCK_SIZE,
+ .chunksize = CAST6_BLOCK_SIZE,
+ .setkey = cast6_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(cast6)",
+ .base.cra_driver_name = "__xts-cast6-avx",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = CAST6_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct cast6_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * CAST6_MIN_KEY_SIZE,
+ .max_keysize = 2 * CAST6_MAX_KEY_SIZE,
+ .ivsize = CAST6_BLOCK_SIZE,
+ .setkey = xts_cast6_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *cast6_simd_algs[ARRAY_SIZE(cast6_algs)];
static int __init cast6_init(void)
{
@@ -593,12 +301,15 @@ static int __init cast6_init(void)
return -ENODEV;
}
- return crypto_register_algs(cast6_algs, ARRAY_SIZE(cast6_algs));
+ return simd_register_skciphers_compat(cast6_algs,
+ ARRAY_SIZE(cast6_algs),
+ cast6_simd_algs);
}
static void __exit cast6_exit(void)
{
- crypto_unregister_algs(cast6_algs, ARRAY_SIZE(cast6_algs));
+ simd_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs),
+ cast6_simd_algs);
}
module_init(cast6_init);
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index 30c0a37f4882..5c610d4ef9fc 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -20,13 +20,13 @@
*
*/
-#include <asm/processor.h>
+#include <crypto/algapi.h>
#include <crypto/des.h>
+#include <crypto/internal/skcipher.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <crypto/algapi.h>
struct des3_ede_x86_ctx {
u32 enc_expkey[DES3_EDE_EXPKEY_WORDS];
@@ -83,18 +83,18 @@ static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
-static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
- const u32 *expkey)
+static int ecb_crypt(struct skcipher_request *req, const u32 *expkey)
{
- unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+ const unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+ struct skcipher_walk walk;
unsigned int nbytes;
int err;
- err = blkcipher_walk_virt(desc, walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk->nbytes)) {
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
+ while ((nbytes = walk.nbytes)) {
+ u8 *wsrc = walk.src.virt.addr;
+ u8 *wdst = walk.dst.virt.addr;
/* Process four block batch */
if (nbytes >= bsize * 3) {
@@ -121,36 +121,31 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
} while (nbytes >= bsize);
done:
- err = blkcipher_walk_done(desc, walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, ctx->enc_expkey);
+ return ecb_crypt(req, ctx->enc_expkey);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_crypt(desc, &walk, ctx->dec_expkey);
+ return ecb_crypt(req, ctx->dec_expkey);
}
-static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -171,27 +166,27 @@ static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
return nbytes;
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __cbc_encrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __cbc_encrypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __cbc_decrypt(struct des3_ede_x86_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
u64 *src = (u64 *)walk->src.virt.addr;
@@ -250,25 +245,26 @@ done:
return nbytes;
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __cbc_decrypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __cbc_decrypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
- struct blkcipher_walk *walk)
+ struct skcipher_walk *walk)
{
u8 *ctrblk = walk->iv;
u8 keystream[DES3_EDE_BLOCK_SIZE];
@@ -282,10 +278,9 @@ static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
}
-static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+static unsigned int __ctr_crypt(struct des3_ede_x86_ctx *ctx,
+ struct skcipher_walk *walk)
{
- struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
unsigned int bsize = DES3_EDE_BLOCK_SIZE;
unsigned int nbytes = walk->nbytes;
__be64 *src = (__be64 *)walk->src.virt.addr;
@@ -333,23 +328,24 @@ done:
return nbytes;
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) {
- nbytes = __ctr_crypt(desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = __ctr_crypt(ctx, &walk);
+ err = skcipher_walk_done(&walk, nbytes);
}
- if (walk.nbytes) {
- ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
+ if (nbytes) {
+ ctr_crypt_final(ctx, &walk);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
@@ -381,7 +377,14 @@ static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
-static struct crypto_alg des3_ede_algs[4] = { {
+static int des3_ede_x86_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return des3_ede_x86_setkey(&tfm->base, key, keylen);
+}
+
+static struct crypto_alg des3_ede_cipher = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-asm",
.cra_priority = 200,
@@ -399,66 +402,50 @@ static struct crypto_alg des3_ede_algs[4] = { {
.cia_decrypt = des3_ede_x86_decrypt,
}
}
-}, {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3_ede-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ede_x86_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3_ede-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = des3_ede_x86_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(des3_ede)",
- .cra_driver_name = "ctr-des3_ede-asm",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = des3_ede_x86_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-} };
+};
+
+static struct skcipher_alg des3_ede_skciphers[] = {
+ {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3_ede-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ede_x86_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3_ede-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = des3_ede_x86_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "ctr(des3_ede)",
+ .base.cra_driver_name = "ctr-des3_ede-asm",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .chunksize = DES3_EDE_BLOCK_SIZE,
+ .setkey = des3_ede_x86_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }
+};
static bool is_blacklisted_cpu(void)
{
@@ -483,17 +470,30 @@ MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
static int __init des3_ede_x86_init(void)
{
+ int err;
+
if (!force && is_blacklisted_cpu()) {
pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n");
return -ENODEV;
}
- return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
+ err = crypto_register_alg(&des3_ede_cipher);
+ if (err)
+ return err;
+
+ err = crypto_register_skciphers(des3_ede_skciphers,
+ ARRAY_SIZE(des3_ede_skciphers));
+ if (err)
+ crypto_unregister_alg(&des3_ede_cipher);
+
+ return err;
}
static void __exit des3_ede_x86_fini(void)
{
- crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
+ crypto_unregister_alg(&des3_ede_cipher);
+ crypto_unregister_skciphers(des3_ede_skciphers,
+ ARRAY_SIZE(des3_ede_skciphers));
}
module_init(des3_ede_x86_init);
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index d61e57960fe0..a78ef99a9981 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -29,313 +29,212 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
#include <crypto/internal/skcipher.h>
-#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/crypto/glue_helper.h>
-static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req)
{
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
+ void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
- unsigned int nbytes, i, func_bytes;
+ struct skcipher_walk walk;
bool fpu_enabled = false;
+ unsigned int nbytes;
int err;
- err = blkcipher_walk_virt(desc, walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk->nbytes)) {
- u8 *wsrc = walk->src.virt.addr;
- u8 *wdst = walk->dst.virt.addr;
+ while ((nbytes = walk.nbytes)) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ unsigned int func_bytes;
+ unsigned int i;
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
-
+ &walk, fpu_enabled, nbytes);
for (i = 0; i < gctx->num_funcs; i++) {
func_bytes = bsize * gctx->funcs[i].num_blocks;
- /* Process multi-block batch */
- if (nbytes >= func_bytes) {
- do {
- gctx->funcs[i].fn_u.ecb(ctx, wdst,
- wsrc);
+ if (nbytes < func_bytes)
+ continue;
- wsrc += func_bytes;
- wdst += func_bytes;
- nbytes -= func_bytes;
- } while (nbytes >= func_bytes);
+ /* Process multi-block batch */
+ do {
+ gctx->funcs[i].fn_u.ecb(ctx, dst, src);
+ src += func_bytes;
+ dst += func_bytes;
+ nbytes -= func_bytes;
+ } while (nbytes >= func_bytes);
- if (nbytes < bsize)
- goto done;
- }
+ if (nbytes < bsize)
+ break;
}
-
-done:
- err = blkcipher_walk_done(desc, walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
glue_fpu_end(fpu_enabled);
return err;
}
+EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
-int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
+ struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return __glue_ecb_crypt_128bit(gctx, desc, &walk);
-}
-EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit);
-
-static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
-{
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
+ void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
- unsigned int nbytes = walk->nbytes;
- u128 *src = (u128 *)walk->src.virt.addr;
- u128 *dst = (u128 *)walk->dst.virt.addr;
- u128 *iv = (u128 *)walk->iv;
-
- do {
- u128_xor(dst, src, iv);
- fn(ctx, (u8 *)dst, (u8 *)dst);
- iv = dst;
-
- src += 1;
- dst += 1;
- nbytes -= bsize;
- } while (nbytes >= bsize);
-
- *(u128 *)walk->iv = *iv;
- return nbytes;
-}
-
-int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct blkcipher_walk walk;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ const u128 *src = (u128 *)walk.src.virt.addr;
+ u128 *dst = (u128 *)walk.dst.virt.addr;
+ u128 *iv = (u128 *)walk.iv;
+
+ do {
+ u128_xor(dst, src, iv);
+ fn(ctx, (u8 *)dst, (u8 *)dst);
+ iv = dst;
+ src++;
+ dst++;
+ nbytes -= bsize;
+ } while (nbytes >= bsize);
+
+ *(u128 *)walk.iv = *iv;
+ err = skcipher_walk_done(&walk, nbytes);
}
-
return err;
}
-EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit);
+EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
-static unsigned int
-__glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req)
{
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
+ void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
const unsigned int bsize = 128 / 8;
- unsigned int nbytes = walk->nbytes;
- u128 *src = (u128 *)walk->src.virt.addr;
- u128 *dst = (u128 *)walk->dst.virt.addr;
- u128 last_iv;
- unsigned int num_blocks, func_bytes;
- unsigned int i;
+ struct skcipher_walk walk;
+ bool fpu_enabled = false;
+ unsigned int nbytes;
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
- /* Start of the last block. */
- src += nbytes / bsize - 1;
- dst += nbytes / bsize - 1;
+ while ((nbytes = walk.nbytes)) {
+ const u128 *src = walk.src.virt.addr;
+ u128 *dst = walk.dst.virt.addr;
+ unsigned int func_bytes, num_blocks;
+ unsigned int i;
+ u128 last_iv;
- last_iv = *src;
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+ &walk, fpu_enabled, nbytes);
+ /* Start of the last block. */
+ src += nbytes / bsize - 1;
+ dst += nbytes / bsize - 1;
- for (i = 0; i < gctx->num_funcs; i++) {
- num_blocks = gctx->funcs[i].num_blocks;
- func_bytes = bsize * num_blocks;
+ last_iv = *src;
- /* Process multi-block batch */
- if (nbytes >= func_bytes) {
+ for (i = 0; i < gctx->num_funcs; i++) {
+ num_blocks = gctx->funcs[i].num_blocks;
+ func_bytes = bsize * num_blocks;
+
+ if (nbytes < func_bytes)
+ continue;
+
+ /* Process multi-block batch */
do {
- nbytes -= func_bytes - bsize;
src -= num_blocks - 1;
dst -= num_blocks - 1;
gctx->funcs[i].fn_u.cbc(ctx, dst, src);
- nbytes -= bsize;
+ nbytes -= func_bytes;
if (nbytes < bsize)
goto done;
- u128_xor(dst, dst, src - 1);
- src -= 1;
- dst -= 1;
+ u128_xor(dst, dst, --src);
+ dst--;
} while (nbytes >= func_bytes);
}
- }
-
done:
- u128_xor(dst, dst, (u128 *)walk->iv);
- *(u128 *)walk->iv = last_iv;
-
- return nbytes;
-}
-
-int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
- struct blkcipher_walk walk;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
- while ((nbytes = walk.nbytes)) {
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
- nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ u128_xor(dst, dst, (u128 *)walk.iv);
+ *(u128 *)walk.iv = last_iv;
+ err = skcipher_walk_done(&walk, nbytes);
}
glue_fpu_end(fpu_enabled);
return err;
}
-EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
+EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
-static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
+int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req)
{
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
- u8 *src = (u8 *)walk->src.virt.addr;
- u8 *dst = (u8 *)walk->dst.virt.addr;
- unsigned int nbytes = walk->nbytes;
- le128 ctrblk;
- u128 tmp;
+ void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ const unsigned int bsize = 128 / 8;
+ struct skcipher_walk walk;
+ bool fpu_enabled = false;
+ unsigned int nbytes;
+ int err;
- be128_to_le128(&ctrblk, (be128 *)walk->iv);
+ err = skcipher_walk_virt(&walk, req, false);
- memcpy(&tmp, src, nbytes);
- fn_ctr(ctx, &tmp, &tmp, &ctrblk);
- memcpy(dst, &tmp, nbytes);
+ while ((nbytes = walk.nbytes) >= bsize) {
+ const u128 *src = walk.src.virt.addr;
+ u128 *dst = walk.dst.virt.addr;
+ unsigned int func_bytes, num_blocks;
+ unsigned int i;
+ le128 ctrblk;
- le128_to_be128((be128 *)walk->iv, &ctrblk);
-}
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+ &walk, fpu_enabled, nbytes);
-static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
-{
- const unsigned int bsize = 128 / 8;
- void *ctx = crypto_blkcipher_ctx(desc->tfm);
- unsigned int nbytes = walk->nbytes;
- u128 *src = (u128 *)walk->src.virt.addr;
- u128 *dst = (u128 *)walk->dst.virt.addr;
- le128 ctrblk;
- unsigned int num_blocks, func_bytes;
- unsigned int i;
+ be128_to_le128(&ctrblk, (be128 *)walk.iv);
- be128_to_le128(&ctrblk, (be128 *)walk->iv);
+ for (i = 0; i < gctx->num_funcs; i++) {
+ num_blocks = gctx->funcs[i].num_blocks;
+ func_bytes = bsize * num_blocks;
- /* Process multi-block batch */
- for (i = 0; i < gctx->num_funcs; i++) {
- num_blocks = gctx->funcs[i].num_blocks;
- func_bytes = bsize * num_blocks;
+ if (nbytes < func_bytes)
+ continue;
- if (nbytes >= func_bytes) {
+ /* Process multi-block batch */
do {
gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
-
src += num_blocks;
dst += num_blocks;
nbytes -= func_bytes;
} while (nbytes >= func_bytes);
if (nbytes < bsize)
- goto done;
+ break;
}
- }
-
-done:
- le128_to_be128((be128 *)walk->iv, &ctrblk);
- return nbytes;
-}
-
-int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
- struct blkcipher_walk walk;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, bsize);
- while ((nbytes = walk.nbytes) >= bsize) {
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled, nbytes);
- nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ le128_to_be128((be128 *)walk.iv, &ctrblk);
+ err = skcipher_walk_done(&walk, nbytes);
}
glue_fpu_end(fpu_enabled);
- if (walk.nbytes) {
- glue_ctr_crypt_final_128bit(
- gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
- err = blkcipher_walk_done(desc, &walk, 0);
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
-
-static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
- void *ctx,
- struct blkcipher_desc *desc,
- struct blkcipher_walk *walk)
-{
- const unsigned int bsize = 128 / 8;
- unsigned int nbytes = walk->nbytes;
- u128 *src = (u128 *)walk->src.virt.addr;
- u128 *dst = (u128 *)walk->dst.virt.addr;
- unsigned int num_blocks, func_bytes;
- unsigned int i;
-
- /* Process multi-block batch */
- for (i = 0; i < gctx->num_funcs; i++) {
- num_blocks = gctx->funcs[i].num_blocks;
- func_bytes = bsize * num_blocks;
-
- if (nbytes >= func_bytes) {
- do {
- gctx->funcs[i].fn_u.xts(ctx, dst, src,
- (le128 *)walk->iv);
+ if (nbytes) {
+ le128 ctrblk;
+ u128 tmp;
- src += num_blocks;
- dst += num_blocks;
- nbytes -= func_bytes;
- } while (nbytes >= func_bytes);
+ be128_to_le128(&ctrblk, (be128 *)walk.iv);
+ memcpy(&tmp, walk.src.virt.addr, nbytes);
+ gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp,
+ &ctrblk);
+ memcpy(walk.dst.virt.addr, &tmp, nbytes);
+ le128_to_be128((be128 *)walk.iv, &ctrblk);
- if (nbytes < bsize)
- goto done;
- }
+ err = skcipher_walk_done(&walk, 0);
}
-done:
- return nbytes;
+ return err;
}
+EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
void *ctx,
@@ -372,46 +271,6 @@ done:
return nbytes;
}
-/* for implementations implementing faster XTS IV generator */
-int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src),
- void *tweak_ctx, void *crypt_ctx)
-{
- const unsigned int bsize = 128 / 8;
- bool fpu_enabled = false;
- struct blkcipher_walk walk;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
-
- err = blkcipher_walk_virt(desc, &walk);
- nbytes = walk.nbytes;
- if (!nbytes)
- return err;
-
- /* set minimum length to bsize, for tweak_fn */
- fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
- desc, fpu_enabled,
- nbytes < bsize ? bsize : nbytes);
-
- /* calculate first value of T */
- tweak_fn(tweak_ctx, walk.iv, walk.iv);
-
- while (nbytes) {
- nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
-
- err = blkcipher_walk_done(desc, &walk, nbytes);
- nbytes = walk.nbytes;
- }
-
- glue_fpu_end(fpu_enabled);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
-
int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
struct skcipher_request *req,
common_glue_func_t tweak_fn, void *tweak_ctx,
@@ -429,9 +288,9 @@ int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
return err;
/* set minimum length to bsize, for tweak_fn */
- fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit,
- &walk, fpu_enabled,
- nbytes < bsize ? bsize : nbytes);
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+ &walk, fpu_enabled,
+ nbytes < bsize ? bsize : nbytes);
/* calculate first value of T */
tweak_fn(tweak_ctx, walk.iv, walk.iv);
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 870f6d812a2d..03347b16ac9d 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -14,15 +14,12 @@
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
-#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
+#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
-#include <asm/fpu/api.h>
-#include <asm/crypto/serpent-avx.h>
+#include <crypto/xts.h>
#include <asm/crypto/glue_helper.h>
+#include <asm/crypto/serpent-avx.h>
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
@@ -40,6 +37,12 @@ asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst,
asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
+static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
+}
+
static const struct common_glue_ctx serpent_enc = {
.num_funcs = 3,
.fpu_blocks_limit = 8,
@@ -136,403 +139,113 @@ static const struct common_glue_ctx serpent_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&serpent_enc, req);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&serpent_dec, req);
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
- dst, src, nbytes);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
+ req);
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
- nbytes);
+ return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
+ return glue_ctr_req_128bit(&serpent_ctr, req);
}
-static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- /* since reusing AVX functions, starts using FPU at 8 parallel blocks */
- return glue_fpu_begin(SERPENT_BLOCK_SIZE, 8, NULL, fpu_enabled, nbytes);
-}
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-static inline void serpent_fpu_end(bool fpu_enabled)
-{
- glue_fpu_end(fpu_enabled);
+ return glue_xts_req_128bit(&serpent_enc_xts, req,
+ XTS_TWEAK_CAST(__serpent_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-struct crypt_priv {
- struct serpent_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= SERPENT_AVX2_PARALLEL_BLOCKS * bsize) {
- serpent_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
- nbytes -= bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
- }
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
- serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
- nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_encrypt(ctx->ctx, srcdst, srcdst);
+ return glue_xts_req_128bit(&serpent_dec_xts, req,
+ XTS_TWEAK_CAST(__serpent_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes >= SERPENT_AVX2_PARALLEL_BLOCKS * bsize) {
- serpent_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
- nbytes -= bsize * SERPENT_AVX2_PARALLEL_BLOCKS;
- }
-
- while (nbytes >= SERPENT_PARALLEL_BLOCKS * bsize) {
- serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
- srcdst += bsize * SERPENT_PARALLEL_BLOCKS;
- nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_decrypt(ctx->ctx, srcdst, srcdst);
-}
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_AVX2_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_AVX2_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&serpent_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&serpent_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static struct crypto_alg srp_algs[10] = { {
- .cra_name = "__ecb-serpent-avx2",
- .cra_driver_name = "__driver-ecb-serpent-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[0].cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-serpent-avx2",
- .cra_driver_name = "__driver-cbc-serpent-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[1].cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-serpent-avx2",
- .cra_driver_name = "__driver-ctr-serpent-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[2].cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-serpent-avx2",
- .cra_driver_name = "__driver-lrw-serpent-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[3].cra_list),
- .cra_exit = lrw_serpent_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = lrw_serpent_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-serpent-avx2",
- .cra_driver_name = "__driver-xts-serpent-avx2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[4].cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = xts_serpent_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(serpent)",
- .cra_driver_name = "ecb-serpent-avx2",
- .cra_priority = 600,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[5].cra_list),
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(serpent)",
- .cra_driver_name = "cbc-serpent-avx2",
- .cra_priority = 600,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[6].cra_list),
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(serpent)",
- .cra_driver_name = "ctr-serpent-avx2",
- .cra_priority = 600,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[7].cra_list),
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(serpent)",
- .cra_driver_name = "lrw-serpent-avx2",
- .cra_priority = 600,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[8].cra_list),
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(serpent)",
- .cra_driver_name = "xts-serpent-avx2",
- .cra_priority = 600,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(srp_algs[9].cra_list),
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg serpent_algs[] = {
+ {
+ .base.cra_name = "__ecb(serpent)",
+ .base.cra_driver_name = "__ecb-serpent-avx2",
+ .base.cra_priority = 600,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(serpent)",
+ .base.cra_driver_name = "__cbc-serpent-avx2",
+ .base.cra_priority = 600,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(serpent)",
+ .base.cra_driver_name = "__ctr-serpent-avx2",
+ .base.cra_priority = 600,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .chunksize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(serpent)",
+ .base.cra_driver_name = "__xts-serpent-avx2",
+ .base.cra_priority = 600,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * SERPENT_MIN_KEY_SIZE,
+ .max_keysize = 2 * SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .setkey = xts_serpent_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
static int __init init(void)
{
@@ -548,12 +261,15 @@ static int __init init(void)
return -ENODEV;
}
- return crypto_register_algs(srp_algs, ARRAY_SIZE(srp_algs));
+ return simd_register_skciphers_compat(serpent_algs,
+ ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
static void __exit fini(void)
{
- crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs));
+ simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
module_init(init);
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 6f778d3daa22..458567ecf76c 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -24,21 +24,15 @@
*/
#include <linux/module.h>
-#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
-#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
#include <crypto/serpent.h>
-#include <crypto/cryptd.h>
-#include <crypto/b128ops.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
#include <crypto/xts.h>
-#include <asm/fpu/api.h>
-#include <asm/crypto/serpent-avx.h>
#include <asm/crypto/glue_helper.h>
+#include <asm/crypto/serpent-avx.h>
/* 8-way parallel cipher functions */
asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
@@ -91,6 +85,31 @@ void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
}
EXPORT_SYMBOL_GPL(serpent_xts_dec);
+static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
+}
+
+int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ /* first half of xts-key is for crypt */
+ err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
+ if (err)
+ return err;
+
+ /* second half of xts-key is for tweak */
+ return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
+}
+EXPORT_SYMBOL_GPL(xts_serpent_setkey);
static const struct common_glue_ctx serpent_enc = {
.num_funcs = 2,
@@ -170,423 +189,113 @@ static const struct common_glue_ctx serpent_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&serpent_enc, req);
}
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
+ return glue_ecb_req_128bit(&serpent_dec, req);
}
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
- dst, src, nbytes);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
+ req);
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
- nbytes);
+ return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
+ return glue_ctr_req_128bit(&serpent_ctr, req);
}
-static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
- NULL, fpu_enabled, nbytes);
-}
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-static inline void serpent_fpu_end(bool fpu_enabled)
-{
- glue_fpu_end(fpu_enabled);
+ return glue_xts_req_128bit(&serpent_enc_xts, req,
+ XTS_TWEAK_CAST(__serpent_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-struct crypt_priv {
- struct serpent_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
- serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_encrypt(ctx->ctx, srcdst, srcdst);
-}
-
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
- serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_decrypt(ctx->ctx, srcdst, srcdst);
-}
-
-int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
- SERPENT_BLOCK_SIZE);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->lrw_table, key + keylen -
- SERPENT_BLOCK_SIZE);
-}
-EXPORT_SYMBOL_GPL(lrw_serpent_setkey);
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
+ return glue_xts_req_128bit(&serpent_dec_xts, req,
+ XTS_TWEAK_CAST(__serpent_encrypt),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-void lrw_serpent_exit_tfm(struct crypto_tfm *tfm)
-{
- struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lrw_free_table(&ctx->lrw_table);
-}
-EXPORT_SYMBOL_GPL(lrw_serpent_exit_tfm);
-
-int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = xts_check_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
-}
-EXPORT_SYMBOL_GPL(xts_serpent_setkey);
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&serpent_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&serpent_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static struct crypto_alg serpent_algs[10] = { {
- .cra_name = "__ecb-serpent-avx",
- .cra_driver_name = "__driver-ecb-serpent-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-serpent-avx",
- .cra_driver_name = "__driver-cbc-serpent-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-serpent-avx",
- .cra_driver_name = "__driver-ctr-serpent-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-serpent-avx",
- .cra_driver_name = "__driver-lrw-serpent-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_serpent_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = lrw_serpent_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-serpent-avx",
- .cra_driver_name = "__driver-xts-serpent-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = xts_serpent_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(serpent)",
- .cra_driver_name = "ecb-serpent-avx",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(serpent)",
- .cra_driver_name = "cbc-serpent-avx",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(serpent)",
- .cra_driver_name = "ctr-serpent-avx",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(serpent)",
- .cra_driver_name = "lrw-serpent-avx",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(serpent)",
- .cra_driver_name = "xts-serpent-avx",
- .cra_priority = 500,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg serpent_algs[] = {
+ {
+ .base.cra_name = "__ecb(serpent)",
+ .base.cra_driver_name = "__ecb-serpent-avx",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(serpent)",
+ .base.cra_driver_name = "__cbc-serpent-avx",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(serpent)",
+ .base.cra_driver_name = "__ctr-serpent-avx",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .chunksize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(serpent)",
+ .base.cra_driver_name = "__xts-serpent-avx",
+ .base.cra_priority = 500,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * SERPENT_MIN_KEY_SIZE,
+ .max_keysize = 2 * SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .setkey = xts_serpent_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
static int __init serpent_init(void)
{
@@ -598,12 +307,15 @@ static int __init serpent_init(void)
return -ENODEV;
}
- return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
+ return simd_register_skciphers_compat(serpent_algs,
+ ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
static void __exit serpent_exit(void)
{
- crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
+ simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
module_init(serpent_init);
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index ac0e831943f5..3dafe137596a 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -30,21 +30,22 @@
*/
#include <linux/module.h>
-#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
-#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
-#include <crypto/serpent.h>
-#include <crypto/cryptd.h>
#include <crypto/b128ops.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
+#include <crypto/internal/simd.h>
+#include <crypto/serpent.h>
#include <asm/crypto/serpent-sse2.h>
#include <asm/crypto/glue_helper.h>
+static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
+}
+
static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
{
u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
@@ -139,464 +140,79 @@ static const struct common_glue_ctx serpent_dec_cbc = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc,
- dst, src, nbytes);
+ return glue_ecb_req_128bit(&serpent_enc, req);
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src,
- nbytes);
+ return glue_ecb_req_128bit(&serpent_dec, req);
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
+ req);
}
-static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS,
- NULL, fpu_enabled, nbytes);
+ return glue_cbc_decrypt_req_128bit(&serpent_dec_cbc, req);
}
-static inline void serpent_fpu_end(bool fpu_enabled)
+static int ctr_crypt(struct skcipher_request *req)
{
- glue_fpu_end(fpu_enabled);
+ return glue_ctr_req_128bit(&serpent_ctr, req);
}
-struct crypt_priv {
- struct serpent_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
- serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_encrypt(ctx->ctx, srcdst, srcdst);
-}
-
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = SERPENT_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
- serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- __serpent_decrypt(ctx->ctx, srcdst, srcdst);
-}
-
-struct serpent_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- struct serpent_ctx serpent_ctx;
-};
-
-static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
- SERPENT_BLOCK_SIZE);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->lrw_table, key + keylen -
- SERPENT_BLOCK_SIZE);
-}
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->serpent_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static void lrw_exit_tfm(struct crypto_tfm *tfm)
-{
- struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lrw_free_table(&ctx->lrw_table);
-}
-
-struct serpent_xts_ctx {
- struct serpent_ctx tweak_ctx;
- struct serpent_ctx crypt_ctx;
+static struct skcipher_alg serpent_algs[] = {
+ {
+ .base.cra_name = "__ecb(serpent)",
+ .base.cra_driver_name = "__ecb-serpent-sse2",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(serpent)",
+ .base.cra_driver_name = "__cbc-serpent-sse2",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = SERPENT_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(serpent)",
+ .base.cra_driver_name = "__ctr-serpent-sse2",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct serpent_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = SERPENT_MIN_KEY_SIZE,
+ .max_keysize = SERPENT_MAX_KEY_SIZE,
+ .ivsize = SERPENT_BLOCK_SIZE,
+ .chunksize = SERPENT_BLOCK_SIZE,
+ .setkey = serpent_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ },
};
-static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = xts_check_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
-}
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->crypt_ctx,
- .fpu_enabled = false,
- };
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = xts_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[SERPENT_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->crypt_ctx,
- .fpu_enabled = false,
- };
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = xts_crypt(desc, dst, src, nbytes, &req);
- serpent_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static struct crypto_alg serpent_algs[10] = { {
- .cra_name = "__ecb-serpent-sse2",
- .cra_driver_name = "__driver-ecb-serpent-sse2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-serpent-sse2",
- .cra_driver_name = "__driver-cbc-serpent-sse2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = serpent_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-serpent-sse2",
- .cra_driver_name = "__driver-ctr-serpent-sse2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct serpent_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = serpent_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-serpent-sse2",
- .cra_driver_name = "__driver-lrw-serpent-sse2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = lrw_serpent_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-serpent-sse2",
- .cra_driver_name = "__driver-xts-serpent-sse2",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct serpent_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = xts_serpent_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(serpent)",
- .cra_driver_name = "ecb-serpent-sse2",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(serpent)",
- .cra_driver_name = "cbc-serpent-sse2",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(serpent)",
- .cra_driver_name = "ctr-serpent-sse2",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(serpent)",
- .cra_driver_name = "lrw-serpent-sse2",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .max_keysize = SERPENT_MAX_KEY_SIZE +
- SERPENT_BLOCK_SIZE,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(serpent)",
- .cra_driver_name = "xts-serpent-sse2",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SERPENT_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = SERPENT_MIN_KEY_SIZE * 2,
- .max_keysize = SERPENT_MAX_KEY_SIZE * 2,
- .ivsize = SERPENT_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-} };
+static struct simd_skcipher_alg *serpent_simd_algs[ARRAY_SIZE(serpent_algs)];
static int __init serpent_sse2_init(void)
{
@@ -605,12 +221,15 @@ static int __init serpent_sse2_init(void)
return -ENODEV;
}
- return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
+ return simd_register_skciphers_compat(serpent_algs,
+ ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
static void __exit serpent_sse2_exit(void)
{
- crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs));
+ simd_unregister_skciphers(serpent_algs, ARRAY_SIZE(serpent_algs),
+ serpent_simd_algs);
}
module_init(serpent_sse2_init);
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
index acf9fdf01671..e17655ffde79 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb.c
@@ -106,13 +106,6 @@ static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
(struct sha1_mb_mgr *state);
-static inline void sha1_init_digest(uint32_t *digest)
-{
- static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0,
- SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 };
- memcpy(digest, initial_digest, sizeof(initial_digest));
-}
-
static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
uint64_t total_len)
{
@@ -244,11 +237,8 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
uint32_t len,
int flags)
{
- if (flags & (~HASH_ENTIRE)) {
- /*
- * User should not pass anything other than FIRST, UPDATE, or
- * LAST
- */
+ if (flags & ~(HASH_UPDATE | HASH_LAST)) {
+ /* User should not pass anything other than UPDATE or LAST */
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
return ctx;
}
@@ -259,24 +249,12 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
return ctx;
}
- if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
+ if (ctx->status & HASH_CTX_STS_COMPLETE) {
/* Cannot update a finished job. */
ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
return ctx;
}
-
- if (flags & HASH_FIRST) {
- /* Init digest */
- sha1_init_digest(ctx->job.result_digest);
-
- /* Reset byte counter */
- ctx->total_length = 0;
-
- /* Clear extra blocks */
- ctx->partial_block_buffer_length = 0;
- }
-
/*
* If we made it here, there were no errors during this call to
* submit
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
index 13590ccf965c..9454bd16f9f8 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
@@ -57,11 +57,9 @@
#include "sha1_mb_mgr.h"
#define HASH_UPDATE 0x00
-#define HASH_FIRST 0x01
-#define HASH_LAST 0x02
-#define HASH_ENTIRE 0x03
-#define HASH_DONE 0x04
-#define HASH_FINAL 0x08
+#define HASH_LAST 0x01
+#define HASH_DONE 0x02
+#define HASH_FINAL 0x04
#define HASH_CTX_STS_IDLE 0x00
#define HASH_CTX_STS_PROCESSING 0x01
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index 7926a226b120..4c46ac1b6653 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -106,14 +106,6 @@ static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
(struct sha256_mb_mgr *state);
-inline void sha256_init_digest(uint32_t *digest)
-{
- static const uint32_t initial_digest[SHA256_DIGEST_LENGTH] = {
- SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
- SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7};
- memcpy(digest, initial_digest, sizeof(initial_digest));
-}
-
inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
uint64_t total_len)
{
@@ -245,10 +237,8 @@ static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
uint32_t len,
int flags)
{
- if (flags & (~HASH_ENTIRE)) {
- /* User should not pass anything other than FIRST, UPDATE
- * or LAST
- */
+ if (flags & ~(HASH_UPDATE | HASH_LAST)) {
+ /* User should not pass anything other than UPDATE or LAST */
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
return ctx;
}
@@ -259,23 +249,12 @@ static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
return ctx;
}
- if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
+ if (ctx->status & HASH_CTX_STS_COMPLETE) {
/* Cannot update a finished job. */
ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
return ctx;
}
- if (flags & HASH_FIRST) {
- /* Init digest */
- sha256_init_digest(ctx->job.result_digest);
-
- /* Reset byte counter */
- ctx->total_length = 0;
-
- /* Clear extra blocks */
- ctx->partial_block_buffer_length = 0;
- }
-
/* If we made it here, there was no error during this call to submit */
ctx->error = HASH_CTX_ERROR_NONE;
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
index aabb30320af0..7c432543dc7f 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
@@ -57,11 +57,9 @@
#include "sha256_mb_mgr.h"
#define HASH_UPDATE 0x00
-#define HASH_FIRST 0x01
-#define HASH_LAST 0x02
-#define HASH_ENTIRE 0x03
-#define HASH_DONE 0x04
-#define HASH_FINAL 0x08
+#define HASH_LAST 0x01
+#define HASH_DONE 0x02
+#define HASH_FINAL 0x04
#define HASH_CTX_STS_IDLE 0x00
#define HASH_CTX_STS_PROCESSING 0x01
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index 458409b7568d..39e2bbdc1836 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -107,15 +107,6 @@ static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
(struct sha512_mb_mgr *state);
-inline void sha512_init_digest(uint64_t *digest)
-{
- static const uint64_t initial_digest[SHA512_DIGEST_LENGTH] = {
- SHA512_H0, SHA512_H1, SHA512_H2,
- SHA512_H3, SHA512_H4, SHA512_H5,
- SHA512_H6, SHA512_H7 };
- memcpy(digest, initial_digest, sizeof(initial_digest));
-}
-
inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
uint64_t total_len)
{
@@ -263,11 +254,8 @@ static struct sha512_hash_ctx
mgr = cstate->mgr;
spin_lock_irqsave(&cstate->work_lock, irqflags);
- if (flags & (~HASH_ENTIRE)) {
- /*
- * User should not pass anything other than FIRST, UPDATE, or
- * LAST
- */
+ if (flags & ~(HASH_UPDATE | HASH_LAST)) {
+ /* User should not pass anything other than UPDATE or LAST */
ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
goto unlock;
}
@@ -278,24 +266,12 @@ static struct sha512_hash_ctx
goto unlock;
}
- if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) {
+ if (ctx->status & HASH_CTX_STS_COMPLETE) {
/* Cannot update a finished job. */
ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
goto unlock;
}
-
- if (flags & HASH_FIRST) {
- /* Init digest */
- sha512_init_digest(ctx->job.result_digest);
-
- /* Reset byte counter */
- ctx->total_length = 0;
-
- /* Clear extra blocks */
- ctx->partial_block_buffer_length = 0;
- }
-
/*
* If we made it here, there were no errors during this call to
* submit
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
index e4653f5eec3f..e5c465bd821e 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
@@ -57,11 +57,9 @@
#include "sha512_mb_mgr.h"
#define HASH_UPDATE 0x00
-#define HASH_FIRST 0x01
-#define HASH_LAST 0x02
-#define HASH_ENTIRE 0x03
-#define HASH_DONE 0x04
-#define HASH_FINAL 0x08
+#define HASH_LAST 0x01
+#define HASH_DONE 0x02
+#define HASH_FINAL 0x04
#define HASH_CTX_STS_IDLE 0x00
#define HASH_CTX_STS_PROCESSING 0x01
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index b7a3904b953c..66d989230d10 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -24,24 +24,15 @@
*/
#include <linux/module.h>
-#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
-#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
#include <crypto/twofish.h>
-#include <crypto/cryptd.h>
-#include <crypto/b128ops.h>
-#include <crypto/ctr.h>
-#include <crypto/lrw.h>
#include <crypto/xts.h>
-#include <asm/fpu/api.h>
-#include <asm/crypto/twofish.h>
#include <asm/crypto/glue_helper.h>
-#include <crypto/scatterwalk.h>
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
+#include <asm/crypto/twofish.h>
#define TWOFISH_PARALLEL_BLOCKS 8
@@ -61,6 +52,12 @@ asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst,
asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src, le128 *iv);
+static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return twofish_setkey(&tfm->base, key, keylen);
+}
+
static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src)
{
@@ -79,6 +76,31 @@ static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
GLUE_FUNC_CAST(twofish_dec_blk));
}
+struct twofish_xts_ctx {
+ struct twofish_ctx tweak_ctx;
+ struct twofish_ctx crypt_ctx;
+};
+
+static int xts_twofish_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ u32 *flags = &tfm->base.crt_flags;
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ /* first half of xts-key is for crypt */
+ err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
+ if (err)
+ return err;
+
+ /* second half of xts-key is for tweak */
+ return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
+ flags);
+}
static const struct common_glue_ctx twofish_enc = {
.num_funcs = 3,
@@ -170,389 +192,113 @@ static const struct common_glue_ctx twofish_dec_xts = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc,
- dst, src, nbytes);
+ return glue_ecb_req_128bit(&twofish_enc, req);
}
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src,
- nbytes);
+ return glue_ecb_req_128bit(&twofish_dec, req);
}
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
+ req);
}
-static inline bool twofish_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
{
- return glue_fpu_begin(TF_BLOCK_SIZE, TWOFISH_PARALLEL_BLOCKS, NULL,
- fpu_enabled, nbytes);
+ return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
}
-static inline void twofish_fpu_end(bool fpu_enabled)
+static int ctr_crypt(struct skcipher_request *req)
{
- glue_fpu_end(fpu_enabled);
+ return glue_ctr_req_128bit(&twofish_ctr, req);
}
-struct crypt_priv {
- struct twofish_ctx *ctx;
- bool fpu_enabled;
-};
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
{
- const unsigned int bsize = TF_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
- twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
- twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- nbytes %= bsize * 3;
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- twofish_enc_blk(ctx->ctx, srcdst, srcdst);
+ return glue_xts_req_128bit(&twofish_enc_xts, req,
+ XTS_TWEAK_CAST(twofish_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
{
- const unsigned int bsize = TF_BLOCK_SIZE;
- struct crypt_priv *ctx = priv;
- int i;
-
- ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes);
-
- if (nbytes == bsize * TWOFISH_PARALLEL_BLOCKS) {
- twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst);
- return;
- }
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
- for (i = 0; i < nbytes / (bsize * 3); i++, srcdst += bsize * 3)
- twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst);
-
- nbytes %= bsize * 3;
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- twofish_dec_blk(ctx->ctx, srcdst, srcdst);
+ return glue_xts_req_128bit(&twofish_dec_xts, req,
+ XTS_TWEAK_CAST(twofish_enc_blk),
+ &ctx->tweak_ctx, &ctx->crypt_ctx);
}
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[TWOFISH_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->twofish_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- twofish_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[TWOFISH_PARALLEL_BLOCKS];
- struct crypt_priv crypt_ctx = {
- .ctx = &ctx->twofish_ctx,
- .fpu_enabled = false,
- };
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
- int ret;
-
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = lrw_crypt(desc, dst, src, nbytes, &req);
- twofish_fpu_end(crypt_ctx.fpu_enabled);
-
- return ret;
-}
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&twofish_enc_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(twofish_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-
- return glue_xts_crypt_128bit(&twofish_dec_xts, desc, dst, src, nbytes,
- XTS_TWEAK_CAST(twofish_enc_blk),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
-}
-
-static struct crypto_alg twofish_algs[10] = { {
- .cra_name = "__ecb-twofish-avx",
- .cra_driver_name = "__driver-ecb-twofish-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .setkey = twofish_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "__cbc-twofish-avx",
- .cra_driver_name = "__driver-cbc-twofish-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .setkey = twofish_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "__ctr-twofish-avx",
- .cra_driver_name = "__driver-ctr-twofish-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = twofish_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "__lrw-twofish-avx",
- .cra_driver_name = "__driver-lrw-twofish-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_twofish_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE +
- TF_BLOCK_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE +
- TF_BLOCK_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = lrw_twofish_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "__xts-twofish-avx",
- .cra_driver_name = "__driver-xts-twofish-avx",
- .cra_priority = 0,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE * 2,
- .max_keysize = TF_MAX_KEY_SIZE * 2,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = xts_twofish_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
- },
-}, {
- .cra_name = "ecb(twofish)",
- .cra_driver_name = "ecb-twofish-avx",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(twofish)",
- .cra_driver_name = "cbc-twofish-avx",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = __ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(twofish)",
- .cra_driver_name = "ctr-twofish-avx",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_encrypt,
- .geniv = "chainiv",
- },
- },
-}, {
- .cra_name = "lrw(twofish)",
- .cra_driver_name = "lrw-twofish-avx",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE +
- TF_BLOCK_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE +
- TF_BLOCK_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
- },
-}, {
- .cra_name = "xts(twofish)",
- .cra_driver_name = "xts-twofish-avx",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct async_helper_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = ablk_init,
- .cra_exit = ablk_exit,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE * 2,
- .max_keysize = TF_MAX_KEY_SIZE * 2,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = ablk_set_key,
- .encrypt = ablk_encrypt,
- .decrypt = ablk_decrypt,
- },
+static struct skcipher_alg twofish_algs[] = {
+ {
+ .base.cra_name = "__ecb(twofish)",
+ .base.cra_driver_name = "__ecb-twofish-avx",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = TF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "__cbc(twofish)",
+ .base.cra_driver_name = "__cbc-twofish-avx",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = TF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .ivsize = TF_BLOCK_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "__ctr(twofish)",
+ .base.cra_driver_name = "__ctr-twofish-avx",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .ivsize = TF_BLOCK_SIZE,
+ .chunksize = TF_BLOCK_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+ }, {
+ .base.cra_name = "__xts(twofish)",
+ .base.cra_driver_name = "__xts-twofish-avx",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
+ .base.cra_blocksize = TF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct twofish_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = 2 * TF_MIN_KEY_SIZE,
+ .max_keysize = 2 * TF_MAX_KEY_SIZE,
+ .ivsize = TF_BLOCK_SIZE,
+ .setkey = xts_twofish_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
},
-} };
+};
+
+static struct simd_skcipher_alg *twofish_simd_algs[ARRAY_SIZE(twofish_algs)];
static int __init twofish_init(void)
{
@@ -563,12 +309,15 @@ static int __init twofish_init(void)
return -ENODEV;
}
- return crypto_register_algs(twofish_algs, ARRAY_SIZE(twofish_algs));
+ return simd_register_skciphers_compat(twofish_algs,
+ ARRAY_SIZE(twofish_algs),
+ twofish_simd_algs);
}
static void __exit twofish_exit(void)
{
- crypto_unregister_algs(twofish_algs, ARRAY_SIZE(twofish_algs));
+ simd_unregister_skciphers(twofish_algs, ARRAY_SIZE(twofish_algs),
+ twofish_simd_algs);
}
module_init(twofish_init);
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 243e90a4b5d9..571485502ec8 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -20,22 +20,26 @@
*
*/
-#include <asm/processor.h>
+#include <asm/crypto/glue_helper.h>
+#include <asm/crypto/twofish.h>
+#include <crypto/algapi.h>
+#include <crypto/b128ops.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/twofish.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <crypto/algapi.h>
-#include <crypto/twofish.h>
-#include <crypto/b128ops.h>
-#include <asm/crypto/twofish.h>
-#include <asm/crypto/glue_helper.h>
-#include <crypto/lrw.h>
-#include <crypto/xts.h>
EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way);
EXPORT_SYMBOL_GPL(twofish_dec_blk_3way);
+static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return twofish_setkey(&tfm->base, key, keylen);
+}
+
static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
const u8 *src)
{
@@ -151,284 +155,74 @@ static const struct common_glue_ctx twofish_dec_cbc = {
} }
};
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&twofish_enc, desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ecb_crypt_128bit(&twofish_dec, desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(twofish_enc_blk), desc,
- dst, src, nbytes);
-}
-
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_cbc_decrypt_128bit(&twofish_dec_cbc, desc, dst, src,
- nbytes);
-}
-
-static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- return glue_ctr_crypt_128bit(&twofish_ctr, desc, dst, src, nbytes);
-}
-
-static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
{
- const unsigned int bsize = TF_BLOCK_SIZE;
- struct twofish_ctx *ctx = priv;
- int i;
-
- if (nbytes == 3 * bsize) {
- twofish_enc_blk_3way(ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- twofish_enc_blk(ctx, srcdst, srcdst);
-}
-
-static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
-{
- const unsigned int bsize = TF_BLOCK_SIZE;
- struct twofish_ctx *ctx = priv;
- int i;
-
- if (nbytes == 3 * bsize) {
- twofish_dec_blk_3way(ctx, srcdst, srcdst);
- return;
- }
-
- for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
- twofish_dec_blk(ctx, srcdst, srcdst);
-}
-
-int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = __twofish_setkey(&ctx->twofish_ctx, key, keylen - TF_BLOCK_SIZE,
- &tfm->crt_flags);
- if (err)
- return err;
-
- return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE);
+ return glue_ecb_req_128bit(&twofish_enc, req);
}
-EXPORT_SYMBOL_GPL(lrw_twofish_setkey);
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
{
- struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[3];
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &ctx->twofish_ctx,
- .crypt_fn = encrypt_callback,
- };
-
- return lrw_crypt(desc, dst, src, nbytes, &req);
+ return glue_ecb_req_128bit(&twofish_dec, req);
}
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
{
- struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- be128 buf[3];
- struct lrw_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .table_ctx = &ctx->lrw_table,
- .crypt_ctx = &ctx->twofish_ctx,
- .crypt_fn = decrypt_callback,
- };
-
- return lrw_crypt(desc, dst, src, nbytes, &req);
+ return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
+ req);
}
-void lrw_twofish_exit_tfm(struct crypto_tfm *tfm)
+static int cbc_decrypt(struct skcipher_request *req)
{
- struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
- lrw_free_table(&ctx->lrw_table);
-}
-EXPORT_SYMBOL_GPL(lrw_twofish_exit_tfm);
-
-int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
- int err;
-
- err = xts_check_key(tfm, key, keylen);
- if (err)
- return err;
-
- /* first half of xts-key is for crypt */
- err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
- flags);
-}
-EXPORT_SYMBOL_GPL(xts_twofish_setkey);
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[3];
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
- .crypt_ctx = &ctx->crypt_ctx,
- .crypt_fn = encrypt_callback,
- };
-
- return xts_crypt(desc, dst, src, nbytes, &req);
+ return glue_cbc_decrypt_req_128bit(&twofish_dec_cbc, req);
}
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
{
- struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- le128 buf[3];
- struct xts_crypt_req req = {
- .tbuf = buf,
- .tbuflen = sizeof(buf),
-
- .tweak_ctx = &ctx->tweak_ctx,
- .tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
- .crypt_ctx = &ctx->crypt_ctx,
- .crypt_fn = decrypt_callback,
- };
-
- return xts_crypt(desc, dst, src, nbytes, &req);
+ return glue_ctr_req_128bit(&twofish_ctr, req);
}
-static struct crypto_alg tf_algs[5] = { {
- .cra_name = "ecb(twofish)",
- .cra_driver_name = "ecb-twofish-3way",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .setkey = twofish_setkey,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- },
- },
-}, {
- .cra_name = "cbc(twofish)",
- .cra_driver_name = "cbc-twofish-3way",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = twofish_setkey,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- },
- },
-}, {
- .cra_name = "ctr(twofish)",
- .cra_driver_name = "ctr-twofish-3way",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = twofish_setkey,
- .encrypt = ctr_crypt,
- .decrypt = ctr_crypt,
- },
- },
-}, {
- .cra_name = "lrw(twofish)",
- .cra_driver_name = "lrw-twofish-3way",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_lrw_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_exit = lrw_twofish_exit_tfm,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE + TF_BLOCK_SIZE,
- .max_keysize = TF_MAX_KEY_SIZE + TF_BLOCK_SIZE,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = lrw_twofish_setkey,
- .encrypt = lrw_encrypt,
- .decrypt = lrw_decrypt,
- },
- },
-}, {
- .cra_name = "xts(twofish)",
- .cra_driver_name = "xts-twofish-3way",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = TF_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct twofish_xts_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = TF_MIN_KEY_SIZE * 2,
- .max_keysize = TF_MAX_KEY_SIZE * 2,
- .ivsize = TF_BLOCK_SIZE,
- .setkey = xts_twofish_setkey,
- .encrypt = xts_encrypt,
- .decrypt = xts_decrypt,
- },
+static struct skcipher_alg tf_skciphers[] = {
+ {
+ .base.cra_name = "ecb(twofish)",
+ .base.cra_driver_name = "ecb-twofish-3way",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = TF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = ecb_encrypt,
+ .decrypt = ecb_decrypt,
+ }, {
+ .base.cra_name = "cbc(twofish)",
+ .base.cra_driver_name = "cbc-twofish-3way",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = TF_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .ivsize = TF_BLOCK_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+ }, {
+ .base.cra_name = "ctr(twofish)",
+ .base.cra_driver_name = "ctr-twofish-3way",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct twofish_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = TF_MIN_KEY_SIZE,
+ .max_keysize = TF_MAX_KEY_SIZE,
+ .ivsize = TF_BLOCK_SIZE,
+ .chunksize = TF_BLOCK_SIZE,
+ .setkey = twofish_setkey_skcipher,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
},
-} };
+};
static bool is_blacklisted_cpu(void)
{
@@ -478,12 +272,13 @@ static int __init init(void)
return -ENODEV;
}
- return crypto_register_algs(tf_algs, ARRAY_SIZE(tf_algs));
+ return crypto_register_skciphers(tf_skciphers,
+ ARRAY_SIZE(tf_skciphers));
}
static void __exit fini(void)
{
- crypto_unregister_algs(tf_algs, ARRAY_SIZE(tf_algs));
+ crypto_unregister_skciphers(tf_skciphers, ARRAY_SIZE(tf_skciphers));
}
module_init(init);
diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h
index 10f8d590bcfe..a5d86fc0593f 100644
--- a/arch/x86/include/asm/crypto/camellia.h
+++ b/arch/x86/include/asm/crypto/camellia.h
@@ -2,8 +2,9 @@
#ifndef ASM_X86_CAMELLIA_H
#define ASM_X86_CAMELLIA_H
-#include <linux/kernel.h>
+#include <crypto/b128ops.h>
#include <linux/crypto.h>
+#include <linux/kernel.h>
#define CAMELLIA_MIN_KEY_SIZE 16
#define CAMELLIA_MAX_KEY_SIZE 32
@@ -11,16 +12,13 @@
#define CAMELLIA_TABLE_BYTE_LEN 272
#define CAMELLIA_PARALLEL_BLOCKS 2
+struct crypto_skcipher;
+
struct camellia_ctx {
u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
u32 key_length;
};
-struct camellia_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- struct camellia_ctx camellia_ctx;
-};
-
struct camellia_xts_ctx {
struct camellia_ctx tweak_ctx;
struct camellia_ctx crypt_ctx;
@@ -30,11 +28,7 @@ extern int __camellia_setkey(struct camellia_ctx *cctx,
const unsigned char *key,
unsigned int key_len, u32 *flags);
-extern int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
-extern void lrw_camellia_exit_tfm(struct crypto_tfm *tfm);
-
-extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+extern int xts_camellia_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
/* regular block cipher functions */
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
index 553a03de55c3..d1818634ae7e 100644
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ b/arch/x86/include/asm/crypto/glue_helper.h
@@ -45,7 +45,7 @@ struct common_glue_ctx {
};
static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
- struct blkcipher_desc *desc,
+ struct skcipher_walk *walk,
bool fpu_enabled, unsigned int nbytes)
{
if (likely(fpu_blocks_limit < 0))
@@ -61,33 +61,6 @@ static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
return false;
- if (desc) {
- /* prevent sleeping if FPU is in use */
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
- }
-
- kernel_fpu_begin();
- return true;
-}
-
-static inline bool glue_skwalk_fpu_begin(unsigned int bsize,
- int fpu_blocks_limit,
- struct skcipher_walk *walk,
- bool fpu_enabled, unsigned int nbytes)
-{
- if (likely(fpu_blocks_limit < 0))
- return false;
-
- if (fpu_enabled)
- return true;
-
- /*
- * Vector-registers are only used when chunk to be processed is large
- * enough, so do not enable FPU until it is necessary.
- */
- if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
- return false;
-
/* prevent sleeping if FPU is in use */
skcipher_walk_atomise(walk);
@@ -126,41 +99,17 @@ static inline void le128_inc(le128 *i)
i->b = cpu_to_le64(b);
}
-extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
-
-extern int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes);
-
-extern int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes);
-
-extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
-
-extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- common_glue_func_t tweak_fn, void *tweak_ctx,
- void *crypt_ctx);
-
-extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
- struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- common_glue_func_t tweak_fn, void *tweak_ctx,
- void *crypt_ctx);
+extern int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req);
+
+extern int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
+ struct skcipher_request *req);
+
+extern int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req);
+
+extern int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
+ struct skcipher_request *req);
extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
struct skcipher_request *req,
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
index c958b7bd0fcb..db7c9cc32234 100644
--- a/arch/x86/include/asm/crypto/serpent-avx.h
+++ b/arch/x86/include/asm/crypto/serpent-avx.h
@@ -2,15 +2,13 @@
#ifndef ASM_X86_SERPENT_AVX_H
#define ASM_X86_SERPENT_AVX_H
-#include <linux/crypto.h>
+#include <crypto/b128ops.h>
#include <crypto/serpent.h>
+#include <linux/types.h>
-#define SERPENT_PARALLEL_BLOCKS 8
+struct crypto_skcipher;
-struct serpent_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- struct serpent_ctx serpent_ctx;
-};
+#define SERPENT_PARALLEL_BLOCKS 8
struct serpent_xts_ctx {
struct serpent_ctx tweak_ctx;
@@ -38,12 +36,7 @@ extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
-extern int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
-
-extern void lrw_serpent_exit_tfm(struct crypto_tfm *tfm);
-
-extern int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
+extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
#endif
diff --git a/arch/x86/include/asm/crypto/twofish.h b/arch/x86/include/asm/crypto/twofish.h
index 65bb80adba3e..f618bf272b90 100644
--- a/arch/x86/include/asm/crypto/twofish.h
+++ b/arch/x86/include/asm/crypto/twofish.h
@@ -4,19 +4,8 @@
#include <linux/crypto.h>
#include <crypto/twofish.h>
-#include <crypto/lrw.h>
#include <crypto/b128ops.h>
-struct twofish_lrw_ctx {
- struct lrw_table_ctx lrw_table;
- struct twofish_ctx twofish_ctx;
-};
-
-struct twofish_xts_ctx {
- struct twofish_ctx tweak_ctx;
- struct twofish_ctx crypt_ctx;
-};
-
/* regular block cipher functions from twofish_x86_64 module */
asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
const u8 *src);
@@ -36,12 +25,4 @@ extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
-extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
-
-extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm);
-
-extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
-
#endif /* ASM_X86_TWOFISH_H */