summaryrefslogtreecommitdiffstats
path: root/crypto/vmac.c
diff options
context:
space:
mode:
authorSalman Qazi <sqazi@google.com>2012-10-05 14:24:14 -0700
committerHerbert Xu <herbert@gondor.apana.org.au>2012-10-15 22:33:20 +0800
commitba1ee070909fae01248b8117da1706f3cf2bfd1b (patch)
tree98d982702fdc17a9699387c59583e9a5ecf0b984 /crypto/vmac.c
parent7291a932c6e27d9768e374e9d648086636daf61c (diff)
downloadlinux-stable-ba1ee070909fae01248b8117da1706f3cf2bfd1b.tar.gz
linux-stable-ba1ee070909fae01248b8117da1706f3cf2bfd1b.tar.bz2
linux-stable-ba1ee070909fae01248b8117da1706f3cf2bfd1b.zip
crypto: vmac - Make VMAC work when blocks aren't aligned
VMAC implementation, as it is, does not work with blocks that are not multiples of 128-bytes. Furthermore, this is a problem when using the implementation on scatterlists, even when the complete plain text is 128-byte multiple, as the pieces that get passed to vmac_update can be pretty much any size. I also added test cases for unaligned blocks. Signed-off-by: Salman Qazi <sqazi@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto/vmac.c')
-rw-r--r--crypto/vmac.c47
1 files changed, 43 insertions, 4 deletions
diff --git a/crypto/vmac.c b/crypto/vmac.c
index f2338ca98368..2eb11a30c29c 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -375,6 +375,11 @@ static void vhash_update(const unsigned char *m,
u64 pkh = ctx->polykey[0];
u64 pkl = ctx->polykey[1];
+ if (!mbytes)
+ return;
+
+ BUG_ON(mbytes % VMAC_NHBYTES);
+
mptr = (u64 *)m;
i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
@@ -454,7 +459,7 @@ do_l3:
}
static u64 vmac(unsigned char m[], unsigned int mbytes,
- unsigned char n[16], u64 *tagl,
+ const unsigned char n[16], u64 *tagl,
struct vmac_ctx_t *ctx)
{
u64 *in_n, *out_p;
@@ -559,8 +564,33 @@ static int vmac_update(struct shash_desc *pdesc, const u8 *p,
{
struct crypto_shash *parent = pdesc->tfm;
struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+ int expand;
+ int min;
+
+ expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
+ VMAC_NHBYTES - ctx->partial_size : 0;
+
+ min = len < expand ? len : expand;
+
+ memcpy(ctx->partial + ctx->partial_size, p, min);
+ ctx->partial_size += min;
+
+ if (len < expand)
+ return 0;
- vhash_update(p, len, &ctx->__vmac_ctx);
+ vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
+ ctx->partial_size = 0;
+
+ len -= expand;
+ p += expand;
+
+ if (len % VMAC_NHBYTES) {
+ memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
+ len % VMAC_NHBYTES);
+ ctx->partial_size = len % VMAC_NHBYTES;
+ }
+
+ vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
return 0;
}
@@ -572,10 +602,20 @@ static int vmac_final(struct shash_desc *pdesc, u8 *out)
vmac_t mac;
u8 nonce[16] = {};
- mac = vmac(NULL, 0, nonce, NULL, ctx);
+ /* vmac() ends up accessing outside the array bounds that
+ * we specify. In appears to access up to the next 2-word
+ * boundary. We'll just be uber cautious and zero the
+ * unwritten bytes in the buffer.
+ */
+ if (ctx->partial_size) {
+ memset(ctx->partial + ctx->partial_size, 0,
+ VMAC_NHBYTES - ctx->partial_size);
+ }
+ mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
memcpy(out, &mac, sizeof(vmac_t));
memset(&mac, 0, sizeof(vmac_t));
memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+ ctx->partial_size = 0;
return 0;
}
@@ -673,4 +713,3 @@ module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");
-