From 9d25917d49d986c417c173bfde50f41f96c5b202 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 00:02:53 +0300
Subject: [PATCH 01/54] crypto: testmgr - add new serpent test vectors

Add new serpent tests for serpent_sse2 x86_64/i586 8-way/4-way code paths.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/tcrypt.c  |   2 +
 crypto/testmgr.c |  30 ++++
 crypto/testmgr.h | 393 ++++++++++++++++++++++++++++++++++++++++++++++-
 3 files changed, 423 insertions(+), 2 deletions(-)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 0c4e80f34651..ac9e4d2a63b8 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -793,6 +793,8 @@ static int do_test(int m)
 
 	case 9:
 		ret += tcrypt_test("ecb(serpent)");
+		ret += tcrypt_test("cbc(serpent)");
+		ret += tcrypt_test("ctr(serpent)");
 		break;
 
 	case 10:
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e91c1eb1722a..49436b900081 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1674,6 +1674,21 @@ static const struct alg_test_desc alg_test_descs[] = {
 				}
 			}
 		}
+	}, {
+		.alg = "cbc(serpent)",
+		.test = alg_test_skcipher,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = serpent_cbc_enc_tv_template,
+					.count = SERPENT_CBC_ENC_TEST_VECTORS
+				},
+				.dec = {
+					.vecs = serpent_cbc_dec_tv_template,
+					.count = SERPENT_CBC_DEC_TEST_VECTORS
+				}
+			}
+		}
 	}, {
 		.alg = "cbc(twofish)",
 		.test = alg_test_skcipher,
@@ -1770,6 +1785,21 @@ static const struct alg_test_desc alg_test_descs[] = {
 				}
 			}
 		}
+	}, {
+		.alg = "ctr(serpent)",
+		.test = alg_test_skcipher,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = serpent_ctr_enc_tv_template,
+					.count = SERPENT_CTR_ENC_TEST_VECTORS
+				},
+				.dec = {
+					.vecs = serpent_ctr_dec_tv_template,
+					.count = SERPENT_CTR_DEC_TEST_VECTORS
+				}
+			}
+		}
 	}, {
 		.alg = "ctr(twofish)",
 		.test = alg_test_skcipher,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 37b4d8f45447..ed4aec950faf 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -3096,12 +3096,18 @@ static struct cipher_testvec tf_ctr_dec_tv_template[] = {
  * Serpent test vectors.  These are backwards because Serpent writes
  * octet sequences in right-to-left mode.
  */
-#define SERPENT_ENC_TEST_VECTORS	4
-#define SERPENT_DEC_TEST_VECTORS	4
+#define SERPENT_ENC_TEST_VECTORS	5
+#define SERPENT_DEC_TEST_VECTORS	5
 
 #define TNEPRES_ENC_TEST_VECTORS	4
 #define TNEPRES_DEC_TEST_VECTORS	4
 
+#define SERPENT_CBC_ENC_TEST_VECTORS	1
+#define SERPENT_CBC_DEC_TEST_VECTORS	1
+
+#define SERPENT_CTR_ENC_TEST_VECTORS	2
+#define SERPENT_CTR_DEC_TEST_VECTORS	2
+
 static struct cipher_testvec serpent_enc_tv_template[] = {
 	{
 		.input	= "\x00\x01\x02\x03\x04\x05\x06\x07"
@@ -3140,6 +3146,50 @@ static struct cipher_testvec serpent_enc_tv_template[] = {
 		.result	= "\xdd\xd2\x6b\x98\xa5\xff\xd8\x2c"
 			  "\x05\x34\x5a\x9d\xad\xbf\xaf\x49",
 		.rlen	= 16,
+	}, { /* Generated with Crypto++ */
+		.key	= "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+			  "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+			  "\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
+			  "\x78\xBE\x9B\x78\x55\x32\x0F\x55",
+		.klen	= 32,
+		.input	= "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
+			  "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
+			  "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
+			  "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
+			  "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+			  "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+			  "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+			  "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+			  "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+			  "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+			  "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+			  "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+			  "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+			  "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+			  "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+			  "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+			  "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+			  "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
+		.ilen	= 144,
+		.result	= "\xFB\xB0\x5D\xDE\xC0\xFE\xFC\xEB"
+			  "\xB1\x80\x10\x43\xDE\x62\x70\xBD"
+			  "\xFA\x8A\x93\xEA\x6B\xF7\xC5\xD7"
+			  "\x0C\xD1\xBB\x29\x25\x14\x4C\x22"
+			  "\x77\xA6\x38\x00\xDB\xB9\xE2\x07"
+			  "\xD1\xAC\x82\xBA\xEA\x67\xAA\x39"
+			  "\x99\x34\x89\x5B\x54\xE9\x12\x13"
+			  "\x3B\x04\xE5\x12\x42\xC5\x79\xAB"
+			  "\x0D\xC7\x3C\x58\x2D\xA3\x98\xF6"
+			  "\xE4\x61\x9E\x17\x0B\xCE\xE8\xAA"
+			  "\xB5\x6C\x1A\x3A\x67\x52\x81\x6A"
+			  "\x04\xFF\x8A\x1B\x96\xFE\xE6\x87"
+			  "\x3C\xD4\x39\x7D\x36\x9B\x03\xD5"
+			  "\xB6\xA0\x75\x3C\x83\xE6\x1C\x73"
+			  "\x9D\x74\x2B\x77\x53\x2D\xE5\xBD"
+			  "\x69\xDA\x7A\x01\xF5\x6A\x70\x39"
+			  "\x30\xD4\x2C\xF2\x8E\x06\x4B\x39"
+			  "\xB3\x12\x1D\xB3\x17\x46\xE6\xD6",
+		.rlen	= 144,
 	},
 };
 
@@ -3231,6 +3281,50 @@ static struct cipher_testvec serpent_dec_tv_template[] = {
 		.ilen	= 16,
 		.result	= zeroed_string,
 		.rlen	= 16,
+	}, { /* Generated with Crypto++ */
+		.key	= "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+			  "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+			  "\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
+			  "\x78\xBE\x9B\x78\x55\x32\x0F\x55",
+		.klen	= 32,
+		.input	= "\xFB\xB0\x5D\xDE\xC0\xFE\xFC\xEB"
+			  "\xB1\x80\x10\x43\xDE\x62\x70\xBD"
+			  "\xFA\x8A\x93\xEA\x6B\xF7\xC5\xD7"
+			  "\x0C\xD1\xBB\x29\x25\x14\x4C\x22"
+			  "\x77\xA6\x38\x00\xDB\xB9\xE2\x07"
+			  "\xD1\xAC\x82\xBA\xEA\x67\xAA\x39"
+			  "\x99\x34\x89\x5B\x54\xE9\x12\x13"
+			  "\x3B\x04\xE5\x12\x42\xC5\x79\xAB"
+			  "\x0D\xC7\x3C\x58\x2D\xA3\x98\xF6"
+			  "\xE4\x61\x9E\x17\x0B\xCE\xE8\xAA"
+			  "\xB5\x6C\x1A\x3A\x67\x52\x81\x6A"
+			  "\x04\xFF\x8A\x1B\x96\xFE\xE6\x87"
+			  "\x3C\xD4\x39\x7D\x36\x9B\x03\xD5"
+			  "\xB6\xA0\x75\x3C\x83\xE6\x1C\x73"
+			  "\x9D\x74\x2B\x77\x53\x2D\xE5\xBD"
+			  "\x69\xDA\x7A\x01\xF5\x6A\x70\x39"
+			  "\x30\xD4\x2C\xF2\x8E\x06\x4B\x39"
+			  "\xB3\x12\x1D\xB3\x17\x46\xE6\xD6",
+		.ilen	= 144,
+		.result	= "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
+			  "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
+			  "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
+			  "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
+			  "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+			  "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+			  "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+			  "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+			  "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+			  "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+			  "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+			  "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+			  "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+			  "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+			  "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+			  "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+			  "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+			  "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
+		.rlen	= 144,
 	},
 };
 
@@ -3275,6 +3369,301 @@ static struct cipher_testvec tnepres_dec_tv_template[] = {
 	},
 };
 
+static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
+	{ /* Generated with Crypto++ */
+		.key	= "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+			  "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+			  "\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
+			  "\x78\xBE\x9B\x78\x55\x32\x0F\x55",
+		.klen	= 32,
+		.iv	= "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+			  "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+		.input	= "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
+			  "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
+			  "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
+			  "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
+			  "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+			  "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+			  "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+			  "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+			  "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+			  "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+			  "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+			  "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+			  "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+			  "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+			  "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+			  "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+			  "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+			  "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
+		.ilen	= 144,
+		.result	= "\x80\xCF\x11\x41\x1A\xB9\x4B\x9C"
+			  "\xFF\xB7\x6C\xEA\xF0\xAF\x77\x6E"
+			  "\x71\x75\x95\x9D\x4E\x1C\xCF\xAD"
+			  "\x81\x34\xE9\x8F\xAE\x5A\x91\x1C"
+			  "\x38\x63\x35\x7E\x79\x18\x0A\xE8"
+			  "\x67\x06\x76\xD5\xFF\x22\x2F\xDA"
+			  "\xB6\x2D\x57\x13\xB6\x3C\xBC\x97"
+			  "\xFE\x53\x75\x35\x97\x7F\x51\xEA"
+			  "\xDF\x5D\xE8\x9D\xCC\xD9\xAE\xE7"
+			  "\x62\x67\xFF\x04\xC2\x18\x22\x5F"
+			  "\x2E\x06\xC1\xE2\x26\xCD\xC6\x1E"
+			  "\xE5\x2C\x4E\x87\x23\xDD\xF0\x41"
+			  "\x08\xA5\xB4\x3E\x07\x1E\x0B\xBB"
+			  "\x72\x84\xF8\x0A\x3F\x38\x5E\x91"
+			  "\x15\x26\xE1\xDB\xA4\x3D\x74\xD2"
+			  "\x41\x1E\x3F\xA9\xC6\x7D\x2A\xAB"
+			  "\x27\xDF\x89\x1D\x86\x3E\xF7\x5A"
+			  "\xF6\xE3\x0F\xC7\x6B\x4C\x96\x7C",
+		.rlen	= 144,
+	},
+};
+
+static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
+	{ /* Generated with Crypto++ */
+		.key	= "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+			  "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+			  "\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
+			  "\x78\xBE\x9B\x78\x55\x32\x0F\x55",
+		.klen	= 32,
+		.iv	= "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+			  "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+		.input	= "\x80\xCF\x11\x41\x1A\xB9\x4B\x9C"
+			  "\xFF\xB7\x6C\xEA\xF0\xAF\x77\x6E"
+			  "\x71\x75\x95\x9D\x4E\x1C\xCF\xAD"
+			  "\x81\x34\xE9\x8F\xAE\x5A\x91\x1C"
+			  "\x38\x63\x35\x7E\x79\x18\x0A\xE8"
+			  "\x67\x06\x76\xD5\xFF\x22\x2F\xDA"
+			  "\xB6\x2D\x57\x13\xB6\x3C\xBC\x97"
+			  "\xFE\x53\x75\x35\x97\x7F\x51\xEA"
+			  "\xDF\x5D\xE8\x9D\xCC\xD9\xAE\xE7"
+			  "\x62\x67\xFF\x04\xC2\x18\x22\x5F"
+			  "\x2E\x06\xC1\xE2\x26\xCD\xC6\x1E"
+			  "\xE5\x2C\x4E\x87\x23\xDD\xF0\x41"
+			  "\x08\xA5\xB4\x3E\x07\x1E\x0B\xBB"
+			  "\x72\x84\xF8\x0A\x3F\x38\x5E\x91"
+			  "\x15\x26\xE1\xDB\xA4\x3D\x74\xD2"
+			  "\x41\x1E\x3F\xA9\xC6\x7D\x2A\xAB"
+			  "\x27\xDF\x89\x1D\x86\x3E\xF7\x5A"
+			  "\xF6\xE3\x0F\xC7\x6B\x4C\x96\x7C",
+		.ilen	= 144,
+		.result	= "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
+			  "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
+			  "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
+			  "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
+			  "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+			  "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+			  "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+			  "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+			  "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+			  "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+			  "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+			  "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+			  "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+			  "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+			  "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+			  "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+			  "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+			  "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
+		.rlen	= 144,
+	},
+};
+
+static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
+	{ /* Generated with Crypto++ */
+		.key	= "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+			  "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+			  "\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
+			  "\x78\xBE\x9B\x78\x55\x32\x0F\x55",
+		.klen	= 32,
+		.iv	= "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+			  "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+		.input	= "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
+			  "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
+			  "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
+			  "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
+			  "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+			  "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+			  "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+			  "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+			  "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+			  "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+			  "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+			  "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+			  "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+			  "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+			  "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+			  "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+			  "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+			  "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
+		.ilen	= 144,
+		.result	= "\x84\x68\xEC\xF2\x1C\x88\x20\xCA"
+			  "\x37\x69\xE3\x3A\x22\x85\x48\x46"
+			  "\x70\xAA\x25\xB4\xCD\x8B\x04\x4E"
+			  "\x8D\x15\x2B\x98\xDF\x7B\x6D\xB9"
+			  "\xE0\x4A\x73\x00\x65\xB6\x1A\x0D"
+			  "\x5C\x60\xDF\x34\xDC\x60\x4C\xDF"
+			  "\xB5\x1F\x26\x8C\xDA\xC1\x11\xA8"
+			  "\x80\xFA\x37\x7A\x89\xAA\xAE\x7B"
+			  "\x92\x6E\xB9\xDC\xC9\x62\x4F\x88"
+			  "\x0A\x5D\x97\x2F\x6B\xAC\x03\x7C"
+			  "\x22\xF6\x55\x5A\xFA\x35\xA5\x17"
+			  "\xA1\x5C\x5E\x2B\x63\x2D\xB9\x91"
+			  "\x3E\x83\x26\x00\x4E\xD5\xBE\xCE"
+			  "\x79\xC4\x3D\xFC\x70\xA0\xAD\x96"
+			  "\xBA\x58\x2A\x1C\xDF\xC2\x3A\xA5"
+			  "\x7C\xB5\x12\x89\xED\xBF\xB6\x09"
+			  "\x13\x4F\x7D\x61\x3C\x5C\x27\xFC"
+			  "\x5D\xE1\x4F\xA1\xEA\xB3\xCA\xB9",
+		.rlen	= 144,
+	}, { /* Generated with Crypto++ */
+		.key	= "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+			  "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+			  "\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
+			  "\x78\xBE\x9B\x78\x55\x32\x0F\x55",
+		.klen	= 32,
+		.iv	= "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+			  "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+		.input	= "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
+			  "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
+			  "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
+			  "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
+			  "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+			  "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+			  "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+			  "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+			  "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+			  "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+			  "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+			  "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+			  "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+			  "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+			  "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+			  "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+			  "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+			  "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+			  "\xF1\x65\xFC",
+		.ilen	= 147,
+		.result	= "\x84\x68\xEC\xF2\x1C\x88\x20\xCA"
+			  "\x37\x69\xE3\x3A\x22\x85\x48\x46"
+			  "\x70\xAA\x25\xB4\xCD\x8B\x04\x4E"
+			  "\x8D\x15\x2B\x98\xDF\x7B\x6D\xB9"
+			  "\xE0\x4A\x73\x00\x65\xB6\x1A\x0D"
+			  "\x5C\x60\xDF\x34\xDC\x60\x4C\xDF"
+			  "\xB5\x1F\x26\x8C\xDA\xC1\x11\xA8"
+			  "\x80\xFA\x37\x7A\x89\xAA\xAE\x7B"
+			  "\x92\x6E\xB9\xDC\xC9\x62\x4F\x88"
+			  "\x0A\x5D\x97\x2F\x6B\xAC\x03\x7C"
+			  "\x22\xF6\x55\x5A\xFA\x35\xA5\x17"
+			  "\xA1\x5C\x5E\x2B\x63\x2D\xB9\x91"
+			  "\x3E\x83\x26\x00\x4E\xD5\xBE\xCE"
+			  "\x79\xC4\x3D\xFC\x70\xA0\xAD\x96"
+			  "\xBA\x58\x2A\x1C\xDF\xC2\x3A\xA5"
+			  "\x7C\xB5\x12\x89\xED\xBF\xB6\x09"
+			  "\x13\x4F\x7D\x61\x3C\x5C\x27\xFC"
+			  "\x5D\xE1\x4F\xA1\xEA\xB3\xCA\xB9"
+			  "\xE6\xD0\x97",
+		.rlen	= 147,
+	},
+};
+
+static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
+	{ /* Generated with Crypto++ */
+		.key	= "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+			  "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+			  "\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
+			  "\x78\xBE\x9B\x78\x55\x32\x0F\x55",
+		.klen	= 32,
+		.iv	= "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+			  "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+		.input	= "\x84\x68\xEC\xF2\x1C\x88\x20\xCA"
+			  "\x37\x69\xE3\x3A\x22\x85\x48\x46"
+			  "\x70\xAA\x25\xB4\xCD\x8B\x04\x4E"
+			  "\x8D\x15\x2B\x98\xDF\x7B\x6D\xB9"
+			  "\xE0\x4A\x73\x00\x65\xB6\x1A\x0D"
+			  "\x5C\x60\xDF\x34\xDC\x60\x4C\xDF"
+			  "\xB5\x1F\x26\x8C\xDA\xC1\x11\xA8"
+			  "\x80\xFA\x37\x7A\x89\xAA\xAE\x7B"
+			  "\x92\x6E\xB9\xDC\xC9\x62\x4F\x88"
+			  "\x0A\x5D\x97\x2F\x6B\xAC\x03\x7C"
+			  "\x22\xF6\x55\x5A\xFA\x35\xA5\x17"
+			  "\xA1\x5C\x5E\x2B\x63\x2D\xB9\x91"
+			  "\x3E\x83\x26\x00\x4E\xD5\xBE\xCE"
+			  "\x79\xC4\x3D\xFC\x70\xA0\xAD\x96"
+			  "\xBA\x58\x2A\x1C\xDF\xC2\x3A\xA5"
+			  "\x7C\xB5\x12\x89\xED\xBF\xB6\x09"
+			  "\x13\x4F\x7D\x61\x3C\x5C\x27\xFC"
+			  "\x5D\xE1\x4F\xA1\xEA\xB3\xCA\xB9",
+		.ilen	= 144,
+		.result	= "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
+			  "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
+			  "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
+			  "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
+			  "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+			  "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+			  "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+			  "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+			  "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+			  "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+			  "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+			  "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+			  "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+			  "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+			  "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+			  "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+			  "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+			  "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A",
+		.rlen	= 144,
+	}, { /* Generated with Crypto++ */
+		.key	= "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
+			  "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
+			  "\x27\x04\xE1\x27\x04\xE1\xBE\x9B"
+			  "\x78\xBE\x9B\x78\x55\x32\x0F\x55",
+		.klen	= 32,
+		.iv	= "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F"
+			  "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64",
+		.input	= "\x84\x68\xEC\xF2\x1C\x88\x20\xCA"
+			  "\x37\x69\xE3\x3A\x22\x85\x48\x46"
+			  "\x70\xAA\x25\xB4\xCD\x8B\x04\x4E"
+			  "\x8D\x15\x2B\x98\xDF\x7B\x6D\xB9"
+			  "\xE0\x4A\x73\x00\x65\xB6\x1A\x0D"
+			  "\x5C\x60\xDF\x34\xDC\x60\x4C\xDF"
+			  "\xB5\x1F\x26\x8C\xDA\xC1\x11\xA8"
+			  "\x80\xFA\x37\x7A\x89\xAA\xAE\x7B"
+			  "\x92\x6E\xB9\xDC\xC9\x62\x4F\x88"
+			  "\x0A\x5D\x97\x2F\x6B\xAC\x03\x7C"
+			  "\x22\xF6\x55\x5A\xFA\x35\xA5\x17"
+			  "\xA1\x5C\x5E\x2B\x63\x2D\xB9\x91"
+			  "\x3E\x83\x26\x00\x4E\xD5\xBE\xCE"
+			  "\x79\xC4\x3D\xFC\x70\xA0\xAD\x96"
+			  "\xBA\x58\x2A\x1C\xDF\xC2\x3A\xA5"
+			  "\x7C\xB5\x12\x89\xED\xBF\xB6\x09"
+			  "\x13\x4F\x7D\x61\x3C\x5C\x27\xFC"
+			  "\x5D\xE1\x4F\xA1\xEA\xB3\xCA\xB9"
+			  "\xE6\xD0\x97",
+		.ilen	= 147,
+		.result	= "\x56\xED\x84\x1B\x8F\x26\xBD\x31"
+			  "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3"
+			  "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15"
+			  "\xAC\x20\xB7\x4E\xE5\x59\xF0\x87"
+			  "\x1E\x92\x29\xC0\x34\xCB\x62\xF9"
+			  "\x6D\x04\x9B\x0F\xA6\x3D\xD4\x48"
+			  "\xDF\x76\x0D\x81\x18\xAF\x23\xBA"
+			  "\x51\xE8\x5C\xF3\x8A\x21\x95\x2C"
+			  "\xC3\x37\xCE\x65\xFC\x70\x07\x9E"
+			  "\x12\xA9\x40\xD7\x4B\xE2\x79\x10"
+			  "\x84\x1B\xB2\x26\xBD\x54\xEB\x5F"
+			  "\xF6\x8D\x01\x98\x2F\xC6\x3A\xD1"
+			  "\x68\xFF\x73\x0A\xA1\x15\xAC\x43"
+			  "\xDA\x4E\xE5\x7C\x13\x87\x1E\xB5"
+			  "\x29\xC0\x57\xEE\x62\xF9\x90\x04"
+			  "\x9B\x32\xC9\x3D\xD4\x6B\x02\x76"
+			  "\x0D\xA4\x18\xAF\x46\xDD\x51\xE8"
+			  "\x7F\x16\x8A\x21\xB8\x2C\xC3\x5A"
+			  "\xF1\x65\xFC",
+		.rlen	= 147,
+	},
+};
 
 /* Cast6 test vectors from RFC 2612 */
 #define CAST6_ENC_TEST_VECTORS	3

From 3f3baf359dd3cc56fbaf9a2fb1a425ce7c18dbff Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 00:02:58 +0300
Subject: [PATCH 02/54] crypto: tcrypt - add test_acipher_speed

Add test_acipher_speed for testing async block ciphers.

Also include tests for aes/des/des3/ede as these appear to have ablk_cipher
implementations available.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/tcrypt.c | 250 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 250 insertions(+)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index ac9e4d2a63b8..dd3a0f8a0912 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -719,6 +719,207 @@ out:
 	crypto_free_ahash(tfm);
 }
 
+static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
+{
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		struct tcrypt_result *tr = req->base.data;
+
+		ret = wait_for_completion_interruptible(&tr->completion);
+		if (!ret)
+			ret = tr->err;
+		INIT_COMPLETION(tr->completion);
+	}
+
+	return ret;
+}
+
+static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
+				int blen, int sec)
+{
+	unsigned long start, end;
+	int bcount;
+	int ret;
+
+	for (start = jiffies, end = start + sec * HZ, bcount = 0;
+	     time_before(jiffies, end); bcount++) {
+		if (enc)
+			ret = do_one_acipher_op(req,
+						crypto_ablkcipher_encrypt(req));
+		else
+			ret = do_one_acipher_op(req,
+						crypto_ablkcipher_decrypt(req));
+
+		if (ret)
+			return ret;
+	}
+
+	pr_cont("%d operations in %d seconds (%ld bytes)\n",
+		bcount, sec, (long)bcount * blen);
+	return 0;
+}
+
+static int test_acipher_cycles(struct ablkcipher_request *req, int enc,
+			       int blen)
+{
+	unsigned long cycles = 0;
+	int ret = 0;
+	int i;
+
+	/* Warm-up run. */
+	for (i = 0; i < 4; i++) {
+		if (enc)
+			ret = do_one_acipher_op(req,
+						crypto_ablkcipher_encrypt(req));
+		else
+			ret = do_one_acipher_op(req,
+						crypto_ablkcipher_decrypt(req));
+
+		if (ret)
+			goto out;
+	}
+
+	/* The real thing. */
+	for (i = 0; i < 8; i++) {
+		cycles_t start, end;
+
+		start = get_cycles();
+		if (enc)
+			ret = do_one_acipher_op(req,
+						crypto_ablkcipher_encrypt(req));
+		else
+			ret = do_one_acipher_op(req,
+						crypto_ablkcipher_decrypt(req));
+		end = get_cycles();
+
+		if (ret)
+			goto out;
+
+		cycles += end - start;
+	}
+
+out:
+	if (ret == 0)
+		pr_cont("1 operation in %lu cycles (%d bytes)\n",
+			(cycles + 4) / 8, blen);
+
+	return ret;
+}
+
+static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
+			       struct cipher_speed_template *template,
+			       unsigned int tcount, u8 *keysize)
+{
+	unsigned int ret, i, j, iv_len;
+	struct tcrypt_result tresult;
+	const char *key;
+	char iv[128];
+	struct ablkcipher_request *req;
+	struct crypto_ablkcipher *tfm;
+	const char *e;
+	u32 *b_size;
+
+	if (enc == ENCRYPT)
+		e = "encryption";
+	else
+		e = "decryption";
+
+	pr_info("\ntesting speed of async %s %s\n", algo, e);
+
+	init_completion(&tresult.completion);
+
+	tfm = crypto_alloc_ablkcipher(algo, 0, 0);
+
+	if (IS_ERR(tfm)) {
+		pr_err("failed to load transform for %s: %ld\n", algo,
+		       PTR_ERR(tfm));
+		return;
+	}
+
+	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+	if (!req) {
+		pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
+		       algo);
+		goto out;
+	}
+
+	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					tcrypt_complete, &tresult);
+
+	i = 0;
+	do {
+		b_size = block_sizes;
+
+		do {
+			struct scatterlist sg[TVMEMSIZE];
+
+			if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
+				pr_err("template (%u) too big for "
+				       "tvmem (%lu)\n", *keysize + *b_size,
+				       TVMEMSIZE * PAGE_SIZE);
+				goto out_free_req;
+			}
+
+			pr_info("test %u (%d bit key, %d byte blocks): ", i,
+				*keysize * 8, *b_size);
+
+			memset(tvmem[0], 0xff, PAGE_SIZE);
+
+			/* set key, plain text and IV */
+			key = tvmem[0];
+			for (j = 0; j < tcount; j++) {
+				if (template[j].klen == *keysize) {
+					key = template[j].key;
+					break;
+				}
+			}
+
+			crypto_ablkcipher_clear_flags(tfm, ~0);
+
+			ret = crypto_ablkcipher_setkey(tfm, key, *keysize);
+			if (ret) {
+				pr_err("setkey() failed flags=%x\n",
+					crypto_ablkcipher_get_flags(tfm));
+				goto out_free_req;
+			}
+
+			sg_init_table(sg, TVMEMSIZE);
+			sg_set_buf(sg, tvmem[0] + *keysize,
+				   PAGE_SIZE - *keysize);
+			for (j = 1; j < TVMEMSIZE; j++) {
+				sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
+				memset(tvmem[j], 0xff, PAGE_SIZE);
+			}
+
+			iv_len = crypto_ablkcipher_ivsize(tfm);
+			if (iv_len)
+				memset(&iv, 0xff, iv_len);
+
+			ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv);
+
+			if (sec)
+				ret = test_acipher_jiffies(req, enc,
+							   *b_size, sec);
+			else
+				ret = test_acipher_cycles(req, enc,
+							  *b_size);
+
+			if (ret) {
+				pr_err("%s() failed flags=%x\n", e,
+					crypto_ablkcipher_get_flags(tfm));
+				break;
+			}
+			b_size++;
+			i++;
+		} while (*b_size);
+		keysize++;
+	} while (*keysize);
+
+out_free_req:
+	ablkcipher_request_free(req);
+out:
+	crypto_free_ablkcipher(tfm);
+}
+
 static void test_available(void)
 {
 	char **name = check;
@@ -1243,6 +1444,55 @@ static int do_test(int m)
 	case 499:
 		break;
 
+	case 500:
+		test_acipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
+				   speed_template_16_24_32);
+		test_acipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
+				   speed_template_16_24_32);
+		test_acipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
+				   speed_template_16_24_32);
+		test_acipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
+				   speed_template_16_24_32);
+		test_acipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
+				   speed_template_32_40_48);
+		test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
+				   speed_template_32_40_48);
+		test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
+				   speed_template_32_48_64);
+		test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
+				   speed_template_32_48_64);
+		test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
+				   speed_template_16_24_32);
+		test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
+				   speed_template_16_24_32);
+		break;
+
+	case 501:
+		test_acipher_speed("ecb(des3_ede)", ENCRYPT, sec,
+				   des3_speed_template, DES3_SPEED_VECTORS,
+				   speed_template_24);
+		test_acipher_speed("ecb(des3_ede)", DECRYPT, sec,
+				   des3_speed_template, DES3_SPEED_VECTORS,
+				   speed_template_24);
+		test_acipher_speed("cbc(des3_ede)", ENCRYPT, sec,
+				   des3_speed_template, DES3_SPEED_VECTORS,
+				   speed_template_24);
+		test_acipher_speed("cbc(des3_ede)", DECRYPT, sec,
+				   des3_speed_template, DES3_SPEED_VECTORS,
+				   speed_template_24);
+		break;
+
+	case 502:
+		test_acipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
+				   speed_template_8);
+		test_acipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
+				   speed_template_8);
+		test_acipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
+				   speed_template_8);
+		test_acipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
+				   speed_template_8);
+		break;
+
 	case 1000:
 		test_available();
 		break;

From 7fb7fe4469d0b870a031a5d33676343979b80625 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 00:03:03 +0300
Subject: [PATCH 03/54] crypto: tcrypt - add serpent speed tests

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/tcrypt.c | 30 ++++++++++++++++++++++++++++++
 1 file changed, 30 insertions(+)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index dd3a0f8a0912..5526065e8e79 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1292,6 +1292,21 @@ static int do_test(int m)
 				  speed_template_16_32);
 		break;
 
+	case 207:
+		test_cipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
+				  speed_template_16_32);
+		test_cipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
+				  speed_template_16_32);
+		test_cipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
+				  speed_template_16_32);
+		test_cipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
+				  speed_template_16_32);
+		test_cipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
+				  speed_template_16_32);
+		test_cipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
+				  speed_template_16_32);
+		break;
+
 	case 300:
 		/* fall through */
 
@@ -1493,6 +1508,21 @@ static int do_test(int m)
 				   speed_template_8);
 		break;
 
+	case 503:
+		test_acipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
+				   speed_template_16_32);
+		test_acipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
+				   speed_template_16_32);
+		test_acipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
+				   speed_template_16_32);
+		test_acipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
+				   speed_template_16_32);
+		test_acipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
+				   speed_template_16_32);
+		test_acipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
+				   speed_template_16_32);
+		break;
+
 	case 1000:
 		test_available();
 		break;

From bc83b8299cb4ac2a9f64215a04854e4c934d1510 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 00:03:08 +0300
Subject: [PATCH 04/54] crypto: serpent - export common functions for
 x86_64/i386-sse2 assembler implementations

Serpent SSE2 assembler implementations only provide 4-way/8-way parallel
functions and need setkey and one-block encrypt/decrypt functions.

CC: Dag Arne Osvik <osvik@ii.uib.no>
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/serpent.c         | 41 +++++++++++++++++++++-------------------
 include/crypto/serpent.h | 25 ++++++++++++++++++++++++
 2 files changed, 47 insertions(+), 19 deletions(-)
 create mode 100644 include/crypto/serpent.h

diff --git a/crypto/serpent.c b/crypto/serpent.c
index b651a55fa569..867ca93ebb63 100644
--- a/crypto/serpent.c
+++ b/crypto/serpent.c
@@ -21,16 +21,12 @@
 #include <asm/byteorder.h>
 #include <linux/crypto.h>
 #include <linux/types.h>
+#include <crypto/serpent.h>
 
 /* Key is padded to the maximum of 256 bits before round key generation.
  * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
  */
 
-#define SERPENT_MIN_KEY_SIZE		  0
-#define SERPENT_MAX_KEY_SIZE		 32
-#define SERPENT_EXPKEY_WORDS		132
-#define SERPENT_BLOCK_SIZE		 16
-
 #define PHI 0x9e3779b9UL
 
 #define keyiter(a,b,c,d,i,j) \
@@ -210,13 +206,7 @@
 	x1 ^= x4;	x3 ^= x4;	x4 &= x0;	\
 	x4 ^= x2;
 
-struct serpent_ctx {
-	u32 expkey[SERPENT_EXPKEY_WORDS];
-};
-
-
-static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
-			  unsigned int keylen)
+int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
 {
 	struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
 	u32 *k = ctx->expkey;
@@ -359,12 +349,11 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(serpent_setkey);
 
-static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
 {
-	struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
-	const u32
-		*k = ctx->expkey;
+	const u32 *k = ctx->expkey;
 	const __le32 *s = (const __le32 *)src;
 	__le32	*d = (__le32 *)dst;
 	u32	r0, r1, r2, r3, r4;
@@ -418,12 +407,18 @@ static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 	d[2] = cpu_to_le32(r2);
 	d[3] = cpu_to_le32(r3);
 }
+EXPORT_SYMBOL_GPL(__serpent_encrypt);
 
-static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
 	struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
-	const u32
-		*k = ((struct serpent_ctx *)ctx)->expkey;
+
+	__serpent_encrypt(ctx, dst, src);
+}
+
+void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
+{
+	const u32 *k = ctx->expkey;
 	const __le32 *s = (const __le32 *)src;
 	__le32	*d = (__le32 *)dst;
 	u32	r0, r1, r2, r3, r4;
@@ -472,6 +467,14 @@ static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 	d[2] = cpu_to_le32(r1);
 	d[3] = cpu_to_le32(r4);
 }
+EXPORT_SYMBOL_GPL(__serpent_decrypt);
+
+static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+	struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	__serpent_decrypt(ctx, dst, src);
+}
 
 static struct crypto_alg serpent_alg = {
 	.cra_name		=	"serpent",
diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h
new file mode 100644
index 000000000000..40df885f9d1f
--- /dev/null
+++ b/include/crypto/serpent.h
@@ -0,0 +1,25 @@
+/*
+ * Common values for serpent algorithms
+ */
+
+#ifndef _CRYPTO_SERPENT_H
+#define _CRYPTO_SERPENT_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define SERPENT_MIN_KEY_SIZE		  0
+#define SERPENT_MAX_KEY_SIZE		 32
+#define SERPENT_EXPKEY_WORDS		132
+#define SERPENT_BLOCK_SIZE		 16
+
+struct serpent_ctx {
+	u32 expkey[SERPENT_EXPKEY_WORDS];
+};
+
+int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
+
+void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
+void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
+
+#endif

From dd6eec242bb2df73743f07535d2bacd6707c6300 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 00:03:13 +0300
Subject: [PATCH 05/54] crypto: serpent - rename module from serpent to
 serpent_generic

Rename module from serpent.ko to serpent_generic.ko and add module alias. This
is to allow assembler implementation to autoload on 'modprobe serpent'. Also
add driver_name and priority for serpent cipher.

CC: Dag Arne Osvik <osvik@ii.uib.no>
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/Makefile  | 4 +++-
 crypto/serpent.c | 3 +++
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/crypto/Makefile b/crypto/Makefile
index 9e6eee2c05db..0e1f2aa7218e 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -65,7 +65,9 @@ obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
 obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o
 obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
 obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
-obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
+
+serpent_generic-y := serpent.o
+obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
 obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia.o
 obj-$(CONFIG_CRYPTO_CAST5) += cast5.o
diff --git a/crypto/serpent.c b/crypto/serpent.c
index 867ca93ebb63..eb6163041b08 100644
--- a/crypto/serpent.c
+++ b/crypto/serpent.c
@@ -478,6 +478,8 @@ static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 
 static struct crypto_alg serpent_alg = {
 	.cra_name		=	"serpent",
+	.cra_driver_name	=	"serpent-generic",
+	.cra_priority		=	100,
 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
 	.cra_blocksize		=	SERPENT_BLOCK_SIZE,
 	.cra_ctxsize		=	sizeof(struct serpent_ctx),
@@ -588,3 +590,4 @@ MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
 MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
 MODULE_ALIAS("tnepres");
+MODULE_ALIAS("serpent");

From b884f8b901b968b90d8d1b82d388583fa1b2605d Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:32:14 +0300
Subject: [PATCH 06/54] crypto: lrw - fix memleak

LRW module leaks child cipher memory when init_tfm() fails because of child
block size not being 16.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/lrw.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/crypto/lrw.c b/crypto/lrw.c
index 358f80be2bf9..fca3246a953f 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -220,6 +220,7 @@ static int init_tfm(struct crypto_tfm *tfm)
 
 	if (crypto_cipher_blocksize(cipher) != 16) {
 		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+		crypto_free_cipher(cipher);
 		return -EINVAL;
 	}
 

From 4660720df61321f9746353ad3188bf4de2408b67 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:32:19 +0300
Subject: [PATCH 07/54] crypto: lrw - use blocksize constant

LRW has fixed blocksize of 16. Define LRW_BLOCK_SIZE and use in place of
crypto_cipher_blocksize().

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/lrw.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/crypto/lrw.c b/crypto/lrw.c
index fca3246a953f..bee60226f723 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -27,6 +27,8 @@
 #include <crypto/b128ops.h>
 #include <crypto/gf128mul.h>
 
+#define LRW_BLOCK_SIZE 16
+
 struct priv {
 	struct crypto_cipher *child;
 	/* optimizes multiplying a random (non incrementing, as at the
@@ -61,7 +63,7 @@ static int setkey(struct crypto_tfm *parent, const u8 *key,
 	struct crypto_cipher *child = ctx->child;
 	int err, i;
 	be128 tmp = { 0 };
-	int bsize = crypto_cipher_blocksize(child);
+	int bsize = LRW_BLOCK_SIZE;
 
 	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
@@ -134,7 +136,7 @@ static int crypt(struct blkcipher_desc *d,
 {
 	int err;
 	unsigned int avail;
-	const int bs = crypto_cipher_blocksize(ctx->child);
+	const int bs = LRW_BLOCK_SIZE;
 	struct sinfo s = {
 		.tfm = crypto_cipher_tfm(ctx->child),
 		.fn = fn
@@ -218,7 +220,7 @@ static int init_tfm(struct crypto_tfm *tfm)
 	if (IS_ERR(cipher))
 		return PTR_ERR(cipher);
 
-	if (crypto_cipher_blocksize(cipher) != 16) {
+	if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) {
 		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
 		crypto_free_cipher(cipher);
 		return -EINVAL;

From 171c02048f50d7187991f251ddeed2d7e5de104f Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:32:24 +0300
Subject: [PATCH 08/54] crypto: lrw - split gf128mul table initialization from
 setkey

Split gf128mul initialization from setkey so that it can be used outside
lrw-module.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/lrw.c | 61 ++++++++++++++++++++++++++++++++++------------------
 1 file changed, 40 insertions(+), 21 deletions(-)

diff --git a/crypto/lrw.c b/crypto/lrw.c
index bee60226f723..91c17fa18374 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -29,8 +29,7 @@
 
 #define LRW_BLOCK_SIZE 16
 
-struct priv {
-	struct crypto_cipher *child;
+struct lrw_table_ctx {
 	/* optimizes multiplying a random (non incrementing, as at the
 	 * start of a new sector) value with key2, we could also have
 	 * used 4k optimization tables or no optimization at all. In the
@@ -45,6 +44,11 @@ struct priv {
 	be128 mulinc[128];
 };
 
+struct priv {
+	struct crypto_cipher *child;
+	struct lrw_table_ctx table;
+};
+
 static inline void setbit128_bbe(void *b, int bit)
 {
 	__set_bit(bit ^ (0x80 -
@@ -56,28 +60,16 @@ static inline void setbit128_bbe(void *b, int bit)
 			), b);
 }
 
-static int setkey(struct crypto_tfm *parent, const u8 *key,
-		  unsigned int keylen)
+static int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
 {
-	struct priv *ctx = crypto_tfm_ctx(parent);
-	struct crypto_cipher *child = ctx->child;
-	int err, i;
 	be128 tmp = { 0 };
-	int bsize = LRW_BLOCK_SIZE;
-
-	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
-				       CRYPTO_TFM_REQ_MASK);
-	if ((err = crypto_cipher_setkey(child, key, keylen - bsize)))
-		return err;
-	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
-				     CRYPTO_TFM_RES_MASK);
+	int i;
 
 	if (ctx->table)
 		gf128mul_free_64k(ctx->table);
 
 	/* initialize multiplication table for Key2 */
-	ctx->table = gf128mul_init_64k_bbe((be128 *)(key + keylen - bsize));
+	ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
 	if (!ctx->table)
 		return -ENOMEM;
 
@@ -91,6 +83,32 @@ static int setkey(struct crypto_tfm *parent, const u8 *key,
 	return 0;
 }
 
+static void lrw_free_table(struct lrw_table_ctx *ctx)
+{
+	if (ctx->table)
+		gf128mul_free_64k(ctx->table);
+}
+
+static int setkey(struct crypto_tfm *parent, const u8 *key,
+		  unsigned int keylen)
+{
+	struct priv *ctx = crypto_tfm_ctx(parent);
+	struct crypto_cipher *child = ctx->child;
+	int err, bsize = LRW_BLOCK_SIZE;
+	const u8 *tweak = key + keylen - bsize;
+
+	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
+				       CRYPTO_TFM_REQ_MASK);
+	err = crypto_cipher_setkey(child, key, keylen - bsize);
+	if (err)
+		return err;
+	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
+				     CRYPTO_TFM_RES_MASK);
+
+	return lrw_init_table(&ctx->table, tweak);
+}
+
 struct sinfo {
 	be128 t;
 	struct crypto_tfm *tfm;
@@ -157,7 +175,7 @@ static int crypt(struct blkcipher_desc *d,
 	s.t = *iv;
 
 	/* T <- I*Key2 */
-	gf128mul_64k_bbe(&s.t, ctx->table);
+	gf128mul_64k_bbe(&s.t, ctx->table.table);
 
 	goto first;
 
@@ -165,7 +183,8 @@ static int crypt(struct blkcipher_desc *d,
 		do {
 			/* T <- I*Key2, using the optimization
 			 * discussed in the specification */
-			be128_xor(&s.t, &s.t, &ctx->mulinc[get_index128(iv)]);
+			be128_xor(&s.t, &s.t,
+				  &ctx->table.mulinc[get_index128(iv)]);
 			inc(iv);
 
 first:
@@ -233,8 +252,8 @@ static int init_tfm(struct crypto_tfm *tfm)
 static void exit_tfm(struct crypto_tfm *tfm)
 {
 	struct priv *ctx = crypto_tfm_ctx(tfm);
-	if (ctx->table)
-		gf128mul_free_64k(ctx->table);
+
+	lrw_free_table(&ctx->table);
 	crypto_free_cipher(ctx->child);
 }
 

From 6c2205b8ffec035f4925b8ee84b7758afeee58b5 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Wed, 9 Nov 2011 11:50:31 +0800
Subject: [PATCH 09/54] crypto: lrw - add interface for parallelized cipher
 implementions

Export gf128mul table initialization routines and add lrw_crypt() function
that can be used by cipher implementations that can benefit from parallelized
cipher operations.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/lrw.c         | 106 +++++++++++++++++++++++++++++++++++--------
 include/crypto/lrw.h |  43 ++++++++++++++++++
 2 files changed, 129 insertions(+), 20 deletions(-)
 create mode 100644 include/crypto/lrw.h

diff --git a/crypto/lrw.c b/crypto/lrw.c
index 91c17fa18374..ba42acc4deba 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
  *
- * Based om ecb.c
+ * Based on ecb.c
  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -16,6 +16,7 @@
  * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
  *
  * The test vectors are included in the testing module tcrypt.[ch] */
+
 #include <crypto/algapi.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -26,23 +27,7 @@
 
 #include <crypto/b128ops.h>
 #include <crypto/gf128mul.h>
-
-#define LRW_BLOCK_SIZE 16
-
-struct lrw_table_ctx {
-	/* optimizes multiplying a random (non incrementing, as at the
-	 * start of a new sector) value with key2, we could also have
-	 * used 4k optimization tables or no optimization at all. In the
-	 * latter case we would have to store key2 here */
-	struct gf128mul_64k *table;
-	/* stores:
-	 *  key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
-	 *  key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
-	 *  key2*{ 0,0,...1,1,1,1,1 }, etc
-	 * needed for optimized multiplication of incrementing values
-	 * with key2 */
-	be128 mulinc[128];
-};
+#include <crypto/lrw.h>
 
 struct priv {
 	struct crypto_cipher *child;
@@ -60,7 +45,7 @@ static inline void setbit128_bbe(void *b, int bit)
 			), b);
 }
 
-static int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
+int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
 {
 	be128 tmp = { 0 };
 	int i;
@@ -82,12 +67,14 @@ static int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(lrw_init_table);
 
-static void lrw_free_table(struct lrw_table_ctx *ctx)
+void lrw_free_table(struct lrw_table_ctx *ctx)
 {
 	if (ctx->table)
 		gf128mul_free_64k(ctx->table);
 }
+EXPORT_SYMBOL_GPL(lrw_free_table);
 
 static int setkey(struct crypto_tfm *parent, const u8 *key,
 		  unsigned int keylen)
@@ -227,6 +214,85 @@ static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 		     crypto_cipher_alg(ctx->child)->cia_decrypt);
 }
 
+int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
+	      struct scatterlist *ssrc, unsigned int nbytes,
+	      struct lrw_crypt_req *req)
+{
+	const unsigned int bsize = LRW_BLOCK_SIZE;
+	const unsigned int max_blks = req->tbuflen / bsize;
+	struct lrw_table_ctx *ctx = req->table_ctx;
+	struct blkcipher_walk walk;
+	unsigned int nblocks;
+	be128 *iv, *src, *dst, *t;
+	be128 *t_buf = req->tbuf;
+	int err, i;
+
+	BUG_ON(max_blks < 1);
+
+	blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
+
+	err = blkcipher_walk_virt(desc, &walk);
+	nbytes = walk.nbytes;
+	if (!nbytes)
+		return err;
+
+	nblocks = min(walk.nbytes / bsize, max_blks);
+	src = (be128 *)walk.src.virt.addr;
+	dst = (be128 *)walk.dst.virt.addr;
+
+	/* calculate first value of T */
+	iv = (be128 *)walk.iv;
+	t_buf[0] = *iv;
+
+	/* T <- I*Key2 */
+	gf128mul_64k_bbe(&t_buf[0], ctx->table);
+
+	i = 0;
+	goto first;
+
+	for (;;) {
+		do {
+			for (i = 0; i < nblocks; i++) {
+				/* T <- I*Key2, using the optimization
+				 * discussed in the specification */
+				be128_xor(&t_buf[i], t,
+						&ctx->mulinc[get_index128(iv)]);
+				inc(iv);
+first:
+				t = &t_buf[i];
+
+				/* PP <- T xor P */
+				be128_xor(dst + i, t, src + i);
+			}
+
+			/* CC <- E(Key2,PP) */
+			req->crypt_fn(req->crypt_ctx, (u8 *)dst,
+				      nblocks * bsize);
+
+			/* C <- T xor CC */
+			for (i = 0; i < nblocks; i++)
+				be128_xor(dst + i, dst + i, &t_buf[i]);
+
+			src += nblocks;
+			dst += nblocks;
+			nbytes -= nblocks * bsize;
+			nblocks = min(nbytes / bsize, max_blks);
+		} while (nblocks > 0);
+
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+		nbytes = walk.nbytes;
+		if (!nbytes)
+			break;
+
+		nblocks = min(nbytes / bsize, max_blks);
+		src = (be128 *)walk.src.virt.addr;
+		dst = (be128 *)walk.dst.virt.addr;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(lrw_crypt);
+
 static int init_tfm(struct crypto_tfm *tfm)
 {
 	struct crypto_cipher *cipher;
diff --git a/include/crypto/lrw.h b/include/crypto/lrw.h
new file mode 100644
index 000000000000..25a2c8716375
--- /dev/null
+++ b/include/crypto/lrw.h
@@ -0,0 +1,43 @@
+#ifndef _CRYPTO_LRW_H
+#define _CRYPTO_LRW_H
+
+#include <crypto/b128ops.h>
+
+struct scatterlist;
+struct gf128mul_64k;
+struct blkcipher_desc;
+
+#define LRW_BLOCK_SIZE 16
+
+struct lrw_table_ctx {
+	/* optimizes multiplying a random (non incrementing, as at the
+	 * start of a new sector) value with key2, we could also have
+	 * used 4k optimization tables or no optimization at all. In the
+	 * latter case we would have to store key2 here */
+	struct gf128mul_64k *table;
+	/* stores:
+	 *  key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
+	 *  key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
+	 *  key2*{ 0,0,...1,1,1,1,1 }, etc
+	 * needed for optimized multiplication of incrementing values
+	 * with key2 */
+	be128 mulinc[128];
+};
+
+int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak);
+void lrw_free_table(struct lrw_table_ctx *ctx);
+
+struct lrw_crypt_req {
+	be128 *tbuf;
+	unsigned int tbuflen;
+
+	struct lrw_table_ctx *table_ctx;
+	void *crypt_ctx;
+	void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
+};
+
+int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+	      struct scatterlist *src, unsigned int nbytes,
+	      struct lrw_crypt_req *req);
+
+#endif  /* _CRYPTO_LRW_H */

From d7bfc0fa31bc237a5d49e9c5638676a5dedff6fc Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:32:34 +0300
Subject: [PATCH 10/54] crypto: testmgr - add lrw(serpent) test vectors

Add test vectors for lrw(serpent). These are generated from lrw(aes) test vectors.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/testmgr.c |  15 ++
 crypto/testmgr.h | 502 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 517 insertions(+)

diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 49436b900081..3cfd8b05554c 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2236,6 +2236,21 @@ static const struct alg_test_desc alg_test_descs[] = {
 				}
 			}
 		}
+	}, {
+		.alg = "lrw(serpent)",
+		.test = alg_test_skcipher,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = serpent_lrw_enc_tv_template,
+					.count = SERPENT_LRW_ENC_TEST_VECTORS
+				},
+				.dec = {
+					.vecs = serpent_lrw_dec_tv_template,
+					.count = SERPENT_LRW_DEC_TEST_VECTORS
+				}
+			}
+		}
 	}, {
 		.alg = "lzo",
 		.test = alg_test_comp,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index ed4aec950faf..1f7c3fd064c0 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -3108,6 +3108,9 @@ static struct cipher_testvec tf_ctr_dec_tv_template[] = {
 #define SERPENT_CTR_ENC_TEST_VECTORS	2
 #define SERPENT_CTR_DEC_TEST_VECTORS	2
 
+#define SERPENT_LRW_ENC_TEST_VECTORS	8
+#define SERPENT_LRW_DEC_TEST_VECTORS	8
+
 static struct cipher_testvec serpent_enc_tv_template[] = {
 	{
 		.input	= "\x00\x01\x02\x03\x04\x05\x06\x07"
@@ -3665,6 +3668,505 @@ static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
 	},
 };
 
+static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
+	/* Generated from AES-LRW test vectors */
+	{
+		.key	= "\x45\x62\xac\x25\xf8\x28\x17\x6d"
+			  "\x4c\x26\x84\x14\xb5\x68\x01\x85"
+			  "\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
+			  "\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\x6f\xbf\xd4\xa4\x5d\x71\x16\x79"
+			  "\x63\x9c\xa6\x8e\x40\xbe\x0d\x8a",
+		.rlen	= 16,
+	}, {
+		.key	= "\x59\x70\x47\x14\xf5\x57\x47\x8c"
+			  "\xd7\x79\xe8\x0f\x54\x88\x79\x44"
+			  "\x0d\x48\xf0\xb7\xb1\x5a\x53\xea"
+			  "\x1c\xaa\x6b\x29\xc2\xca\xfb\xaf",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x02",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\xfd\xb2\x66\x98\x80\x96\x55\xad"
+			  "\x08\x94\x54\x9c\x21\x7c\x69\xe3",
+		.rlen	= 16,
+	}, {
+		.key	= "\xd8\x2a\x91\x34\xb2\x6a\x56\x50"
+			  "\x30\xfe\x69\xe2\x37\x7f\x98\x47"
+			  "\xcd\xf9\x0b\x16\x0c\x64\x8f\xb6"
+			  "\xb0\x0d\x0d\x1b\xae\x85\x87\x1f",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\x14\x5e\x3d\x70\xc0\x6e\x9c\x34"
+			  "\x5b\x5e\xcf\x0f\xe4\x8c\x21\x5c",
+		.rlen	= 16,
+	}, {
+		.key	= "\x0f\x6a\xef\xf8\xd3\xd2\xbb\x15"
+			  "\x25\x83\xf7\x3c\x1f\x01\x28\x74"
+			  "\xca\xc6\xbc\x35\x4d\x4a\x65\x54"
+			  "\x90\xae\x61\xcf\x7b\xae\xbd\xcc"
+			  "\xad\xe4\x94\xc5\x4a\x29\xae\x70",
+		.klen	= 40,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\x25\x39\xaa\xa5\xf0\x65\xc8\xdc"
+			  "\x5d\x45\x95\x30\x8f\xff\x2f\x1b",
+		.rlen	= 16,
+	}, {
+		.key	= "\x8a\xd4\xee\x10\x2f\xbd\x81\xff"
+			  "\xf8\x86\xce\xac\x93\xc5\xad\xc6"
+			  "\xa0\x19\x07\xc0\x9d\xf7\xbb\xdd"
+			  "\x52\x13\xb2\xb7\xf0\xff\x11\xd8"
+			  "\xd6\x08\xd0\xcd\x2e\xb1\x17\x6f",
+		.klen	= 40,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\x0c\x20\x20\x63\xd6\x8b\xfc\x8f"
+			  "\xc0\xe2\x17\xbb\xd2\x59\x6f\x26",
+		.rlen	= 16,
+	}, {
+		.key	= "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
+			  "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
+			  "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
+			  "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
+			  "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
+			  "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\xc1\x35\x2e\x53\xf0\x96\x4d\x9c"
+			  "\x2e\x18\xe6\x99\xcd\xd3\x15\x68",
+		.rlen	= 16,
+	}, {
+		.key	= "\xfb\x76\x15\xb2\x3d\x80\x89\x1d"
+			  "\xd4\x70\x98\x0b\xc7\x95\x84\xc8"
+			  "\xb2\xfb\x64\xce\x60\x97\x87\x8d"
+			  "\x17\xfc\xe4\x5a\x49\xe8\x30\xb7"
+			  "\x6e\x78\x17\xe7\x2d\x5e\x12\xd4"
+			  "\x60\x64\x04\x7a\xf1\x2f\x9e\x0c",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\x86\x0a\xc6\xa9\x1a\x9f\xe7\xe6"
+			  "\x64\x3b\x33\xd6\xd5\x84\xd6\xdf",
+		.rlen	= 16,
+	}, {
+		.key	= "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
+			  "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
+			  "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
+			  "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
+			  "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
+			  "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x05\x11\xb7\x18\xab\xc6\x2d\xac"
+			  "\x70\x5d\xf6\x22\x94\xcd\xe5\x6c"
+			  "\x17\x6b\xf6\x1c\xf0\xf3\x6e\xf8"
+			  "\x50\x38\x1f\x71\x49\xb6\x57\xd6"
+			  "\x8f\xcb\x8d\x6b\xe3\xa6\x29\x90"
+			  "\xfe\x2a\x62\x82\xae\x6d\x8b\xf6"
+			  "\xad\x1e\x9e\x20\x5f\x38\xbe\x04"
+			  "\xda\x10\x8e\xed\xa2\xa4\x87\xab"
+			  "\xda\x6b\xb4\x0c\x75\xba\xd3\x7c"
+			  "\xc9\xac\x42\x31\x95\x7c\xc9\x04"
+			  "\xeb\xd5\x6e\x32\x69\x8a\xdb\xa6"
+			  "\x15\xd7\x3f\x4f\x2f\x66\x69\x03"
+			  "\x9c\x1f\x54\x0f\xde\x1f\xf3\x65"
+			  "\x4c\x96\x12\xed\x7c\x92\x03\x01"
+			  "\x6f\xbc\x35\x93\xac\xf1\x27\xf1"
+			  "\xb4\x96\x82\x5a\x5f\xb0\xa0\x50"
+			  "\x89\xa4\x8e\x66\x44\x85\xcc\xfd"
+			  "\x33\x14\x70\xe3\x96\xb2\xc3\xd3"
+			  "\xbb\x54\x5a\x1a\xf9\x74\xa2\xc5"
+			  "\x2d\x64\x75\xdd\xb4\x54\xe6\x74"
+			  "\x8c\xd3\x9d\x9e\x86\xab\x51\x53"
+			  "\xb7\x93\x3e\x6f\xd0\x4e\x2c\x40"
+			  "\xf6\xa8\x2e\x3e\x9d\xf4\x66\xa5"
+			  "\x76\x12\x73\x44\x1a\x56\xd7\x72"
+			  "\x88\xcd\x21\x8c\x4c\x0f\xfe\xda"
+			  "\x95\xe0\x3a\xa6\xa5\x84\x46\xcd"
+			  "\xd5\x3e\x9d\x3a\xe2\x67\xe6\x60"
+			  "\x1a\xe2\x70\x85\x58\xc2\x1b\x09"
+			  "\xe1\xd7\x2c\xca\xad\xa8\x8f\xf9"
+			  "\xac\xb3\x0e\xdb\xca\x2e\xe2\xb8"
+			  "\x51\x71\xd9\x3c\x6c\xf1\x56\xf8"
+			  "\xea\x9c\xf1\xfb\x0c\xe6\xb7\x10"
+			  "\x1c\xf8\xa9\x7c\xe8\x53\x35\xc1"
+			  "\x90\x3e\x76\x4a\x74\xa4\x21\x2c"
+			  "\xf6\x2c\x4e\x0f\x94\x3a\x88\x2e"
+			  "\x41\x09\x6a\x33\x7d\xf6\xdd\x3f"
+			  "\x8d\x23\x31\x74\x84\xeb\x88\x6e"
+			  "\xcc\xb9\xbc\x22\x83\x19\x07\x22"
+			  "\xa5\x2d\xdf\xa5\xf3\x80\x85\x78"
+			  "\x84\x39\x6a\x6d\x6a\x99\x4f\xa5"
+			  "\x15\xfe\x46\xb0\xe4\x6c\xa5\x41"
+			  "\x3c\xce\x8f\x42\x60\x71\xa7\x75"
+			  "\x08\x40\x65\x8a\x82\xbf\xf5\x43"
+			  "\x71\x96\xa9\x4d\x44\x8a\x20\xbe"
+			  "\xfa\x4d\xbb\xc0\x7d\x31\x96\x65"
+			  "\xe7\x75\xe5\x3e\xfd\x92\x3b\xc9"
+			  "\x55\xbb\x16\x7e\xf7\xc2\x8c\xa4"
+			  "\x40\x1d\xe5\xef\x0e\xdf\xe4\x9a"
+			  "\x62\x73\x65\xfd\x46\x63\x25\x3d"
+			  "\x2b\xaf\xe5\x64\xfe\xa5\x5c\xcf"
+			  "\x24\xf3\xb4\xac\x64\xba\xdf\x4b"
+			  "\xc6\x96\x7d\x81\x2d\x8d\x97\xf7"
+			  "\xc5\x68\x77\x84\x32\x2b\xcc\x85"
+			  "\x74\x96\xf0\x12\x77\x61\xb9\xeb"
+			  "\x71\xaa\x82\xcb\x1c\xdb\x89\xc8"
+			  "\xc6\xb5\xe3\x5c\x7d\x39\x07\x24"
+			  "\xda\x39\x87\x45\xc0\x2b\xbb\x01"
+			  "\xac\xbc\x2a\x5c\x7f\xfc\xe8\xce"
+			  "\x6d\x9c\x6f\xed\xd3\xc1\xa1\xd6"
+			  "\xc5\x55\xa9\x66\x2f\xe1\xc8\x32"
+			  "\xa6\x5d\xa4\x3a\x98\x73\xe8\x45"
+			  "\xa4\xc7\xa8\xb4\xf6\x13\x03\xf6"
+			  "\xe9\x2e\xc4\x29\x0f\x84\xdb\xc4"
+			  "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
+		.ilen	= 512,
+		.result	= "\xe3\x5a\x38\x0f\x4d\x92\x3a\x74"
+			  "\x15\xb1\x50\x8c\x9a\xd8\x99\x1d"
+			  "\x82\xec\xf1\x5f\x03\x6d\x02\x58"
+			  "\x90\x67\xfc\xdd\x8d\xe1\x38\x08"
+			  "\x7b\xc9\x9b\x4b\x04\x09\x50\x15"
+			  "\xce\xab\xda\x33\x30\x20\x12\xfa"
+			  "\x83\xc4\xa6\x9a\x2e\x7d\x90\xd9"
+			  "\xa6\xa6\x67\x43\xb4\xa7\xa8\x5c"
+			  "\xbb\x6a\x49\x2b\x8b\xf8\xd0\x22"
+			  "\xe5\x9e\xba\xe8\x8c\x67\xb8\x5b"
+			  "\x60\xbc\xf5\xa4\x95\x4e\x66\xe5"
+			  "\x6d\x8e\xa9\xf6\x65\x2e\x04\xf5"
+			  "\xba\xb5\xdb\x88\xc2\xf6\x7a\x4b"
+			  "\x89\x58\x7c\x9a\xae\x26\xe8\xb7"
+			  "\xb7\x28\xcc\xd6\xcc\xa5\x98\x4d"
+			  "\xb9\x91\xcb\xb4\xe4\x8b\x96\x47"
+			  "\x5f\x03\x8b\xdd\x94\xd1\xee\x12"
+			  "\xa7\x83\x80\xf2\xc1\x15\x74\x4f"
+			  "\x49\xf9\xb0\x7e\x6f\xdc\x73\x2f"
+			  "\xe2\xcf\xe0\x1b\x34\xa5\xa0\x52"
+			  "\xfb\x3c\x5d\x85\x91\xe6\x6d\x98"
+			  "\x04\xd6\xdd\x4c\x00\x64\xd9\x54"
+			  "\x5c\x3c\x08\x1d\x4c\x06\x9f\xb8"
+			  "\x1c\x4d\x8d\xdc\xa4\x3c\xb9\x3b"
+			  "\x9e\x85\xce\xc3\xa8\x4a\x0c\xd9"
+			  "\x04\xc3\x6f\x17\x66\xa9\x1f\x59"
+			  "\xd9\xe2\x19\x36\xa3\x88\xb8\x0b"
+			  "\x0f\x4a\x4d\xf8\xc8\x6f\xd5\x43"
+			  "\xeb\xa0\xab\x1f\x61\xc0\x06\xeb"
+			  "\x93\xb7\xb8\x6f\x0d\xbd\x07\x49"
+			  "\xb3\xac\x5d\xcf\x31\xa0\x27\x26"
+			  "\x21\xbe\x94\x2e\x19\xea\xf4\xee"
+			  "\xb5\x13\x89\xf7\x94\x0b\xef\x59"
+			  "\x44\xc5\x78\x8b\x3c\x3b\x71\x20"
+			  "\xf9\x35\x0c\x70\x74\xdc\x5b\xc2"
+			  "\xb4\x11\x0e\x2c\x61\xa1\x52\x46"
+			  "\x18\x11\x16\xc6\x86\x44\xa7\xaf"
+			  "\xd5\x0c\x7d\xa6\x9e\x25\x2d\x1b"
+			  "\x9a\x8f\x0f\xf8\x6a\x61\xa0\xea"
+			  "\x3f\x0e\x90\xd6\x8f\x83\x30\x64"
+			  "\xb5\x51\x2d\x08\x3c\xcd\x99\x36"
+			  "\x96\xd4\xb1\xb5\x48\x30\xca\x48"
+			  "\xf7\x11\xa8\xf5\x97\x8a\x6a\x6d"
+			  "\x12\x33\x2f\xc0\xe8\xda\xec\x8a"
+			  "\xe1\x88\x72\x63\xde\x20\xa3\xe1"
+			  "\x8e\xac\x84\x37\x35\xf5\xf7\x3f"
+			  "\x00\x02\x0e\xe4\xc1\x53\x68\x3f"
+			  "\xaa\xd5\xac\x52\x3d\x20\x2f\x4d"
+			  "\x7c\x83\xd0\xbd\xaa\x97\x35\x36"
+			  "\x98\x88\x59\x5d\xe7\x24\xe3\x90"
+			  "\x9d\x30\x47\xa7\xc3\x60\x35\xf4"
+			  "\xd5\xdb\x0e\x4d\x44\xc1\x81\x8b"
+			  "\xfd\xbd\xc3\x2b\xba\x68\xfe\x8d"
+			  "\x49\x5a\x3c\x8a\xa3\x01\xae\x25"
+			  "\x42\xab\xd2\x87\x1b\x35\xd6\xd2"
+			  "\xd7\x70\x1c\x1f\x72\xd1\xe1\x39"
+			  "\x1c\x58\xa2\xb4\xd0\x78\x55\x72"
+			  "\x76\x59\xea\xd9\xd7\x6e\x63\x8b"
+			  "\xcc\x9b\xa7\x74\x89\xfc\xa3\x68"
+			  "\x86\x28\xd1\xbb\x54\x8d\x66\xad"
+			  "\x2a\x92\xf9\x4e\x04\x3d\xae\xfd"
+			  "\x1b\x2b\x7f\xc3\x2f\x1a\x78\x0a"
+			  "\x5c\xc6\x84\xfe\x7c\xcb\x26\xfd"
+			  "\xd9\x51\x0f\xd7\x94\x2f\xc5\xa7",
+		.rlen	= 512,
+	},
+};
+
+static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
+	/* Generated from AES-LRW test vectors */
+	/* same as enc vectors with input and result reversed */
+	{
+		.key	= "\x45\x62\xac\x25\xf8\x28\x17\x6d"
+			  "\x4c\x26\x84\x14\xb5\x68\x01\x85"
+			  "\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
+			  "\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x6f\xbf\xd4\xa4\x5d\x71\x16\x79"
+			  "\x63\x9c\xa6\x8e\x40\xbe\x0d\x8a",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\x59\x70\x47\x14\xf5\x57\x47\x8c"
+			  "\xd7\x79\xe8\x0f\x54\x88\x79\x44"
+			  "\x0d\x48\xf0\xb7\xb1\x5a\x53\xea"
+			  "\x1c\xaa\x6b\x29\xc2\xca\xfb\xaf",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x02",
+		.input	= "\xfd\xb2\x66\x98\x80\x96\x55\xad"
+			  "\x08\x94\x54\x9c\x21\x7c\x69\xe3",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\xd8\x2a\x91\x34\xb2\x6a\x56\x50"
+			  "\x30\xfe\x69\xe2\x37\x7f\x98\x47"
+			  "\xcd\xf9\x0b\x16\x0c\x64\x8f\xb6"
+			  "\xb0\x0d\x0d\x1b\xae\x85\x87\x1f",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\x14\x5e\x3d\x70\xc0\x6e\x9c\x34"
+			  "\x5b\x5e\xcf\x0f\xe4\x8c\x21\x5c",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\x0f\x6a\xef\xf8\xd3\xd2\xbb\x15"
+			  "\x25\x83\xf7\x3c\x1f\x01\x28\x74"
+			  "\xca\xc6\xbc\x35\x4d\x4a\x65\x54"
+			  "\x90\xae\x61\xcf\x7b\xae\xbd\xcc"
+			  "\xad\xe4\x94\xc5\x4a\x29\xae\x70",
+		.klen	= 40,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x25\x39\xaa\xa5\xf0\x65\xc8\xdc"
+			  "\x5d\x45\x95\x30\x8f\xff\x2f\x1b",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\x8a\xd4\xee\x10\x2f\xbd\x81\xff"
+			  "\xf8\x86\xce\xac\x93\xc5\xad\xc6"
+			  "\xa0\x19\x07\xc0\x9d\xf7\xbb\xdd"
+			  "\x52\x13\xb2\xb7\xf0\xff\x11\xd8"
+			  "\xd6\x08\xd0\xcd\x2e\xb1\x17\x6f",
+		.klen	= 40,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\x0c\x20\x20\x63\xd6\x8b\xfc\x8f"
+			  "\xc0\xe2\x17\xbb\xd2\x59\x6f\x26",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
+			  "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
+			  "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
+			  "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
+			  "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
+			  "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\xc1\x35\x2e\x53\xf0\x96\x4d\x9c"
+			  "\x2e\x18\xe6\x99\xcd\xd3\x15\x68",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\xfb\x76\x15\xb2\x3d\x80\x89\x1d"
+			  "\xd4\x70\x98\x0b\xc7\x95\x84\xc8"
+			  "\xb2\xfb\x64\xce\x60\x97\x87\x8d"
+			  "\x17\xfc\xe4\x5a\x49\xe8\x30\xb7"
+			  "\x6e\x78\x17\xe7\x2d\x5e\x12\xd4"
+			  "\x60\x64\x04\x7a\xf1\x2f\x9e\x0c",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\x86\x0a\xc6\xa9\x1a\x9f\xe7\xe6"
+			  "\x64\x3b\x33\xd6\xd5\x84\xd6\xdf",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
+			  "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
+			  "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
+			  "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
+			  "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
+			  "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\xe3\x5a\x38\x0f\x4d\x92\x3a\x74"
+			  "\x15\xb1\x50\x8c\x9a\xd8\x99\x1d"
+			  "\x82\xec\xf1\x5f\x03\x6d\x02\x58"
+			  "\x90\x67\xfc\xdd\x8d\xe1\x38\x08"
+			  "\x7b\xc9\x9b\x4b\x04\x09\x50\x15"
+			  "\xce\xab\xda\x33\x30\x20\x12\xfa"
+			  "\x83\xc4\xa6\x9a\x2e\x7d\x90\xd9"
+			  "\xa6\xa6\x67\x43\xb4\xa7\xa8\x5c"
+			  "\xbb\x6a\x49\x2b\x8b\xf8\xd0\x22"
+			  "\xe5\x9e\xba\xe8\x8c\x67\xb8\x5b"
+			  "\x60\xbc\xf5\xa4\x95\x4e\x66\xe5"
+			  "\x6d\x8e\xa9\xf6\x65\x2e\x04\xf5"
+			  "\xba\xb5\xdb\x88\xc2\xf6\x7a\x4b"
+			  "\x89\x58\x7c\x9a\xae\x26\xe8\xb7"
+			  "\xb7\x28\xcc\xd6\xcc\xa5\x98\x4d"
+			  "\xb9\x91\xcb\xb4\xe4\x8b\x96\x47"
+			  "\x5f\x03\x8b\xdd\x94\xd1\xee\x12"
+			  "\xa7\x83\x80\xf2\xc1\x15\x74\x4f"
+			  "\x49\xf9\xb0\x7e\x6f\xdc\x73\x2f"
+			  "\xe2\xcf\xe0\x1b\x34\xa5\xa0\x52"
+			  "\xfb\x3c\x5d\x85\x91\xe6\x6d\x98"
+			  "\x04\xd6\xdd\x4c\x00\x64\xd9\x54"
+			  "\x5c\x3c\x08\x1d\x4c\x06\x9f\xb8"
+			  "\x1c\x4d\x8d\xdc\xa4\x3c\xb9\x3b"
+			  "\x9e\x85\xce\xc3\xa8\x4a\x0c\xd9"
+			  "\x04\xc3\x6f\x17\x66\xa9\x1f\x59"
+			  "\xd9\xe2\x19\x36\xa3\x88\xb8\x0b"
+			  "\x0f\x4a\x4d\xf8\xc8\x6f\xd5\x43"
+			  "\xeb\xa0\xab\x1f\x61\xc0\x06\xeb"
+			  "\x93\xb7\xb8\x6f\x0d\xbd\x07\x49"
+			  "\xb3\xac\x5d\xcf\x31\xa0\x27\x26"
+			  "\x21\xbe\x94\x2e\x19\xea\xf4\xee"
+			  "\xb5\x13\x89\xf7\x94\x0b\xef\x59"
+			  "\x44\xc5\x78\x8b\x3c\x3b\x71\x20"
+			  "\xf9\x35\x0c\x70\x74\xdc\x5b\xc2"
+			  "\xb4\x11\x0e\x2c\x61\xa1\x52\x46"
+			  "\x18\x11\x16\xc6\x86\x44\xa7\xaf"
+			  "\xd5\x0c\x7d\xa6\x9e\x25\x2d\x1b"
+			  "\x9a\x8f\x0f\xf8\x6a\x61\xa0\xea"
+			  "\x3f\x0e\x90\xd6\x8f\x83\x30\x64"
+			  "\xb5\x51\x2d\x08\x3c\xcd\x99\x36"
+			  "\x96\xd4\xb1\xb5\x48\x30\xca\x48"
+			  "\xf7\x11\xa8\xf5\x97\x8a\x6a\x6d"
+			  "\x12\x33\x2f\xc0\xe8\xda\xec\x8a"
+			  "\xe1\x88\x72\x63\xde\x20\xa3\xe1"
+			  "\x8e\xac\x84\x37\x35\xf5\xf7\x3f"
+			  "\x00\x02\x0e\xe4\xc1\x53\x68\x3f"
+			  "\xaa\xd5\xac\x52\x3d\x20\x2f\x4d"
+			  "\x7c\x83\xd0\xbd\xaa\x97\x35\x36"
+			  "\x98\x88\x59\x5d\xe7\x24\xe3\x90"
+			  "\x9d\x30\x47\xa7\xc3\x60\x35\xf4"
+			  "\xd5\xdb\x0e\x4d\x44\xc1\x81\x8b"
+			  "\xfd\xbd\xc3\x2b\xba\x68\xfe\x8d"
+			  "\x49\x5a\x3c\x8a\xa3\x01\xae\x25"
+			  "\x42\xab\xd2\x87\x1b\x35\xd6\xd2"
+			  "\xd7\x70\x1c\x1f\x72\xd1\xe1\x39"
+			  "\x1c\x58\xa2\xb4\xd0\x78\x55\x72"
+			  "\x76\x59\xea\xd9\xd7\x6e\x63\x8b"
+			  "\xcc\x9b\xa7\x74\x89\xfc\xa3\x68"
+			  "\x86\x28\xd1\xbb\x54\x8d\x66\xad"
+			  "\x2a\x92\xf9\x4e\x04\x3d\xae\xfd"
+			  "\x1b\x2b\x7f\xc3\x2f\x1a\x78\x0a"
+			  "\x5c\xc6\x84\xfe\x7c\xcb\x26\xfd"
+			  "\xd9\x51\x0f\xd7\x94\x2f\xc5\xa7",
+		.ilen	= 512,
+		.result	= "\x05\x11\xb7\x18\xab\xc6\x2d\xac"
+			  "\x70\x5d\xf6\x22\x94\xcd\xe5\x6c"
+			  "\x17\x6b\xf6\x1c\xf0\xf3\x6e\xf8"
+			  "\x50\x38\x1f\x71\x49\xb6\x57\xd6"
+			  "\x8f\xcb\x8d\x6b\xe3\xa6\x29\x90"
+			  "\xfe\x2a\x62\x82\xae\x6d\x8b\xf6"
+			  "\xad\x1e\x9e\x20\x5f\x38\xbe\x04"
+			  "\xda\x10\x8e\xed\xa2\xa4\x87\xab"
+			  "\xda\x6b\xb4\x0c\x75\xba\xd3\x7c"
+			  "\xc9\xac\x42\x31\x95\x7c\xc9\x04"
+			  "\xeb\xd5\x6e\x32\x69\x8a\xdb\xa6"
+			  "\x15\xd7\x3f\x4f\x2f\x66\x69\x03"
+			  "\x9c\x1f\x54\x0f\xde\x1f\xf3\x65"
+			  "\x4c\x96\x12\xed\x7c\x92\x03\x01"
+			  "\x6f\xbc\x35\x93\xac\xf1\x27\xf1"
+			  "\xb4\x96\x82\x5a\x5f\xb0\xa0\x50"
+			  "\x89\xa4\x8e\x66\x44\x85\xcc\xfd"
+			  "\x33\x14\x70\xe3\x96\xb2\xc3\xd3"
+			  "\xbb\x54\x5a\x1a\xf9\x74\xa2\xc5"
+			  "\x2d\x64\x75\xdd\xb4\x54\xe6\x74"
+			  "\x8c\xd3\x9d\x9e\x86\xab\x51\x53"
+			  "\xb7\x93\x3e\x6f\xd0\x4e\x2c\x40"
+			  "\xf6\xa8\x2e\x3e\x9d\xf4\x66\xa5"
+			  "\x76\x12\x73\x44\x1a\x56\xd7\x72"
+			  "\x88\xcd\x21\x8c\x4c\x0f\xfe\xda"
+			  "\x95\xe0\x3a\xa6\xa5\x84\x46\xcd"
+			  "\xd5\x3e\x9d\x3a\xe2\x67\xe6\x60"
+			  "\x1a\xe2\x70\x85\x58\xc2\x1b\x09"
+			  "\xe1\xd7\x2c\xca\xad\xa8\x8f\xf9"
+			  "\xac\xb3\x0e\xdb\xca\x2e\xe2\xb8"
+			  "\x51\x71\xd9\x3c\x6c\xf1\x56\xf8"
+			  "\xea\x9c\xf1\xfb\x0c\xe6\xb7\x10"
+			  "\x1c\xf8\xa9\x7c\xe8\x53\x35\xc1"
+			  "\x90\x3e\x76\x4a\x74\xa4\x21\x2c"
+			  "\xf6\x2c\x4e\x0f\x94\x3a\x88\x2e"
+			  "\x41\x09\x6a\x33\x7d\xf6\xdd\x3f"
+			  "\x8d\x23\x31\x74\x84\xeb\x88\x6e"
+			  "\xcc\xb9\xbc\x22\x83\x19\x07\x22"
+			  "\xa5\x2d\xdf\xa5\xf3\x80\x85\x78"
+			  "\x84\x39\x6a\x6d\x6a\x99\x4f\xa5"
+			  "\x15\xfe\x46\xb0\xe4\x6c\xa5\x41"
+			  "\x3c\xce\x8f\x42\x60\x71\xa7\x75"
+			  "\x08\x40\x65\x8a\x82\xbf\xf5\x43"
+			  "\x71\x96\xa9\x4d\x44\x8a\x20\xbe"
+			  "\xfa\x4d\xbb\xc0\x7d\x31\x96\x65"
+			  "\xe7\x75\xe5\x3e\xfd\x92\x3b\xc9"
+			  "\x55\xbb\x16\x7e\xf7\xc2\x8c\xa4"
+			  "\x40\x1d\xe5\xef\x0e\xdf\xe4\x9a"
+			  "\x62\x73\x65\xfd\x46\x63\x25\x3d"
+			  "\x2b\xaf\xe5\x64\xfe\xa5\x5c\xcf"
+			  "\x24\xf3\xb4\xac\x64\xba\xdf\x4b"
+			  "\xc6\x96\x7d\x81\x2d\x8d\x97\xf7"
+			  "\xc5\x68\x77\x84\x32\x2b\xcc\x85"
+			  "\x74\x96\xf0\x12\x77\x61\xb9\xeb"
+			  "\x71\xaa\x82\xcb\x1c\xdb\x89\xc8"
+			  "\xc6\xb5\xe3\x5c\x7d\x39\x07\x24"
+			  "\xda\x39\x87\x45\xc0\x2b\xbb\x01"
+			  "\xac\xbc\x2a\x5c\x7f\xfc\xe8\xce"
+			  "\x6d\x9c\x6f\xed\xd3\xc1\xa1\xd6"
+			  "\xc5\x55\xa9\x66\x2f\xe1\xc8\x32"
+			  "\xa6\x5d\xa4\x3a\x98\x73\xe8\x45"
+			  "\xa4\xc7\xa8\xb4\xf6\x13\x03\xf6"
+			  "\xe9\x2e\xc4\x29\x0f\x84\xdb\xc4"
+			  "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
+		.rlen	= 512,
+	},
+};
+
 /* Cast6 test vectors from RFC 2612 */
 #define CAST6_ENC_TEST_VECTORS	3
 #define CAST6_DEC_TEST_VECTORS  3

From 87aae4bfb2912d18f2c92a4484b9edcc8c7b3f21 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:32:39 +0300
Subject: [PATCH 11/54] crypto: tcrypt - add lrw(serpent) tests

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/tcrypt.c | 9 +++++++++
 crypto/tcrypt.h | 1 +
 2 files changed, 10 insertions(+)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 5526065e8e79..9a9e170035d1 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -996,6 +996,7 @@ static int do_test(int m)
 		ret += tcrypt_test("ecb(serpent)");
 		ret += tcrypt_test("cbc(serpent)");
 		ret += tcrypt_test("ctr(serpent)");
+		ret += tcrypt_test("lrw(serpent)");
 		break;
 
 	case 10:
@@ -1305,6 +1306,10 @@ static int do_test(int m)
 				  speed_template_16_32);
 		test_cipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
 				  speed_template_16_32);
+		test_cipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
+				  speed_template_32_48);
+		test_cipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
+				  speed_template_32_48);
 		break;
 
 	case 300:
@@ -1521,6 +1526,10 @@ static int do_test(int m)
 				   speed_template_16_32);
 		test_acipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
 				   speed_template_16_32);
+		test_acipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
+				   speed_template_32_48);
+		test_acipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
+				   speed_template_32_48);
 		break;
 
 	case 1000:
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 10cb925132c9..3eceaef2754d 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -51,6 +51,7 @@ static u8 speed_template_8_32[] = {8, 32, 0};
 static u8 speed_template_16_32[] = {16, 32, 0};
 static u8 speed_template_16_24_32[] = {16, 24, 32, 0};
 static u8 speed_template_32_40_48[] = {32, 40, 48, 0};
+static u8 speed_template_32_48[] = {32, 48, 0};
 static u8 speed_template_32_48_64[] = {32, 48, 64, 0};
 
 /*

From 0b2a15510699754b7c777a0f17520f1960c0d13a Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:32:50 +0300
Subject: [PATCH 12/54] crypto: testmgr - add lrw(twofish) test vectors

Add test vectors for lrw(twofish). These are generated from lrw(aes) test vectors.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/testmgr.c |  15 ++
 crypto/testmgr.h | 501 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 516 insertions(+)

diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 3cfd8b05554c..ae33d3075ce7 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2251,6 +2251,21 @@ static const struct alg_test_desc alg_test_descs[] = {
 				}
 			}
 		}
+	}, {
+		.alg = "lrw(twofish)",
+		.test = alg_test_skcipher,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = tf_lrw_enc_tv_template,
+					.count = TF_LRW_ENC_TEST_VECTORS
+				},
+				.dec = {
+					.vecs = tf_lrw_dec_tv_template,
+					.count = TF_LRW_DEC_TEST_VECTORS
+				}
+			}
+		}
 	}, {
 		.alg = "lzo",
 		.test = alg_test_comp,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 1f7c3fd064c0..4b887895c1e4 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -2717,6 +2717,8 @@ static struct cipher_testvec bf_ctr_dec_tv_template[] = {
 #define TF_CBC_DEC_TEST_VECTORS		5
 #define TF_CTR_ENC_TEST_VECTORS		2
 #define TF_CTR_DEC_TEST_VECTORS		2
+#define TF_LRW_ENC_TEST_VECTORS		8
+#define TF_LRW_DEC_TEST_VECTORS		8
 
 static struct cipher_testvec tf_enc_tv_template[] = {
 	{
@@ -3092,6 +3094,505 @@ static struct cipher_testvec tf_ctr_dec_tv_template[] = {
 	},
 };
 
+static struct cipher_testvec tf_lrw_enc_tv_template[] = {
+	/* Generated from AES-LRW test vectors */
+	{
+		.key	= "\x45\x62\xac\x25\xf8\x28\x17\x6d"
+			  "\x4c\x26\x84\x14\xb5\x68\x01\x85"
+			  "\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
+			  "\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\xa1\x6c\x50\x69\x26\xa4\xef\x7b"
+			  "\x7c\xc6\x91\xeb\x72\xdd\x9b\xee",
+		.rlen	= 16,
+	}, {
+		.key	= "\x59\x70\x47\x14\xf5\x57\x47\x8c"
+			  "\xd7\x79\xe8\x0f\x54\x88\x79\x44"
+			  "\x0d\x48\xf0\xb7\xb1\x5a\x53\xea"
+			  "\x1c\xaa\x6b\x29\xc2\xca\xfb\xaf",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x02",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\xab\x72\x0a\xad\x3b\x0c\xf0\xc9"
+			  "\x42\x2f\xf1\xae\xf1\x3c\xb1\xbd",
+		.rlen	= 16,
+	}, {
+		.key	= "\xd8\x2a\x91\x34\xb2\x6a\x56\x50"
+			  "\x30\xfe\x69\xe2\x37\x7f\x98\x47"
+			  "\xcd\xf9\x0b\x16\x0c\x64\x8f\xb6"
+			  "\xb0\x0d\x0d\x1b\xae\x85\x87\x1f",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\x85\xa7\x56\x67\x08\xfa\x42\xe1"
+			  "\x22\xe6\x82\xfc\xd9\xb4\xd7\xd4",
+		.rlen	= 16,
+	}, {
+		.key	= "\x0f\x6a\xef\xf8\xd3\xd2\xbb\x15"
+			  "\x25\x83\xf7\x3c\x1f\x01\x28\x74"
+			  "\xca\xc6\xbc\x35\x4d\x4a\x65\x54"
+			  "\x90\xae\x61\xcf\x7b\xae\xbd\xcc"
+			  "\xad\xe4\x94\xc5\x4a\x29\xae\x70",
+		.klen	= 40,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\xd2\xaf\x69\x35\x24\x1d\x0e\x1c"
+			  "\x84\x8b\x05\xe4\xa2\x2f\x16\xf5",
+		.rlen	= 16,
+	}, {
+		.key	= "\x8a\xd4\xee\x10\x2f\xbd\x81\xff"
+			  "\xf8\x86\xce\xac\x93\xc5\xad\xc6"
+			  "\xa0\x19\x07\xc0\x9d\xf7\xbb\xdd"
+			  "\x52\x13\xb2\xb7\xf0\xff\x11\xd8"
+			  "\xd6\x08\xd0\xcd\x2e\xb1\x17\x6f",
+		.klen	= 40,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\x4a\x23\x56\xd7\xff\x90\xd0\x9a"
+			  "\x0d\x7c\x26\xfc\xf0\xf0\xf6\xe4",
+		.rlen	= 16,
+	}, {
+		.key	= "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
+			  "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
+			  "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
+			  "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
+			  "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
+			  "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\x30\xaf\x26\x05\x9d\x5d\x0a\x58"
+			  "\xe2\xe7\xce\x8a\xb2\x56\x6d\x76",
+		.rlen	= 16,
+	}, {
+		.key	= "\xfb\x76\x15\xb2\x3d\x80\x89\x1d"
+			  "\xd4\x70\x98\x0b\xc7\x95\x84\xc8"
+			  "\xb2\xfb\x64\xce\x60\x97\x87\x8d"
+			  "\x17\xfc\xe4\x5a\x49\xe8\x30\xb7"
+			  "\x6e\x78\x17\xe7\x2d\x5e\x12\xd4"
+			  "\x60\x64\x04\x7a\xf1\x2f\x9e\x0c",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.ilen	= 16,
+		.result	= "\xdf\xcf\xdc\xd2\xe1\xcf\x86\x75"
+			  "\x17\x66\x5e\x0c\x14\xa1\x3d\x40",
+		.rlen	= 16,
+	}, {
+		.key	= "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
+			  "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
+			  "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
+			  "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
+			  "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
+			  "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x05\x11\xb7\x18\xab\xc6\x2d\xac"
+			  "\x70\x5d\xf6\x22\x94\xcd\xe5\x6c"
+			  "\x17\x6b\xf6\x1c\xf0\xf3\x6e\xf8"
+			  "\x50\x38\x1f\x71\x49\xb6\x57\xd6"
+			  "\x8f\xcb\x8d\x6b\xe3\xa6\x29\x90"
+			  "\xfe\x2a\x62\x82\xae\x6d\x8b\xf6"
+			  "\xad\x1e\x9e\x20\x5f\x38\xbe\x04"
+			  "\xda\x10\x8e\xed\xa2\xa4\x87\xab"
+			  "\xda\x6b\xb4\x0c\x75\xba\xd3\x7c"
+			  "\xc9\xac\x42\x31\x95\x7c\xc9\x04"
+			  "\xeb\xd5\x6e\x32\x69\x8a\xdb\xa6"
+			  "\x15\xd7\x3f\x4f\x2f\x66\x69\x03"
+			  "\x9c\x1f\x54\x0f\xde\x1f\xf3\x65"
+			  "\x4c\x96\x12\xed\x7c\x92\x03\x01"
+			  "\x6f\xbc\x35\x93\xac\xf1\x27\xf1"
+			  "\xb4\x96\x82\x5a\x5f\xb0\xa0\x50"
+			  "\x89\xa4\x8e\x66\x44\x85\xcc\xfd"
+			  "\x33\x14\x70\xe3\x96\xb2\xc3\xd3"
+			  "\xbb\x54\x5a\x1a\xf9\x74\xa2\xc5"
+			  "\x2d\x64\x75\xdd\xb4\x54\xe6\x74"
+			  "\x8c\xd3\x9d\x9e\x86\xab\x51\x53"
+			  "\xb7\x93\x3e\x6f\xd0\x4e\x2c\x40"
+			  "\xf6\xa8\x2e\x3e\x9d\xf4\x66\xa5"
+			  "\x76\x12\x73\x44\x1a\x56\xd7\x72"
+			  "\x88\xcd\x21\x8c\x4c\x0f\xfe\xda"
+			  "\x95\xe0\x3a\xa6\xa5\x84\x46\xcd"
+			  "\xd5\x3e\x9d\x3a\xe2\x67\xe6\x60"
+			  "\x1a\xe2\x70\x85\x58\xc2\x1b\x09"
+			  "\xe1\xd7\x2c\xca\xad\xa8\x8f\xf9"
+			  "\xac\xb3\x0e\xdb\xca\x2e\xe2\xb8"
+			  "\x51\x71\xd9\x3c\x6c\xf1\x56\xf8"
+			  "\xea\x9c\xf1\xfb\x0c\xe6\xb7\x10"
+			  "\x1c\xf8\xa9\x7c\xe8\x53\x35\xc1"
+			  "\x90\x3e\x76\x4a\x74\xa4\x21\x2c"
+			  "\xf6\x2c\x4e\x0f\x94\x3a\x88\x2e"
+			  "\x41\x09\x6a\x33\x7d\xf6\xdd\x3f"
+			  "\x8d\x23\x31\x74\x84\xeb\x88\x6e"
+			  "\xcc\xb9\xbc\x22\x83\x19\x07\x22"
+			  "\xa5\x2d\xdf\xa5\xf3\x80\x85\x78"
+			  "\x84\x39\x6a\x6d\x6a\x99\x4f\xa5"
+			  "\x15\xfe\x46\xb0\xe4\x6c\xa5\x41"
+			  "\x3c\xce\x8f\x42\x60\x71\xa7\x75"
+			  "\x08\x40\x65\x8a\x82\xbf\xf5\x43"
+			  "\x71\x96\xa9\x4d\x44\x8a\x20\xbe"
+			  "\xfa\x4d\xbb\xc0\x7d\x31\x96\x65"
+			  "\xe7\x75\xe5\x3e\xfd\x92\x3b\xc9"
+			  "\x55\xbb\x16\x7e\xf7\xc2\x8c\xa4"
+			  "\x40\x1d\xe5\xef\x0e\xdf\xe4\x9a"
+			  "\x62\x73\x65\xfd\x46\x63\x25\x3d"
+			  "\x2b\xaf\xe5\x64\xfe\xa5\x5c\xcf"
+			  "\x24\xf3\xb4\xac\x64\xba\xdf\x4b"
+			  "\xc6\x96\x7d\x81\x2d\x8d\x97\xf7"
+			  "\xc5\x68\x77\x84\x32\x2b\xcc\x85"
+			  "\x74\x96\xf0\x12\x77\x61\xb9\xeb"
+			  "\x71\xaa\x82\xcb\x1c\xdb\x89\xc8"
+			  "\xc6\xb5\xe3\x5c\x7d\x39\x07\x24"
+			  "\xda\x39\x87\x45\xc0\x2b\xbb\x01"
+			  "\xac\xbc\x2a\x5c\x7f\xfc\xe8\xce"
+			  "\x6d\x9c\x6f\xed\xd3\xc1\xa1\xd6"
+			  "\xc5\x55\xa9\x66\x2f\xe1\xc8\x32"
+			  "\xa6\x5d\xa4\x3a\x98\x73\xe8\x45"
+			  "\xa4\xc7\xa8\xb4\xf6\x13\x03\xf6"
+			  "\xe9\x2e\xc4\x29\x0f\x84\xdb\xc4"
+			  "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
+		.ilen	= 512,
+		.result	= "\x30\x38\xeb\xaf\x12\x43\x1a\x89"
+			  "\x62\xa2\x36\xe5\xcf\x77\x1e\xd9"
+			  "\x08\xc3\x0d\xdd\x95\xab\x19\x96"
+			  "\x27\x52\x41\xc3\xca\xfb\xf6\xee"
+			  "\x40\x2d\xdf\xdd\x00\x0c\xb9\x0a"
+			  "\x3a\xf0\xc0\xd1\xda\x63\x9e\x45"
+			  "\x42\xe9\x29\xc0\xb4\x07\xb4\x31"
+			  "\x66\x77\x72\xb5\xb6\xb3\x57\x46"
+			  "\x34\x9a\xfe\x03\xaf\x6b\x36\x07"
+			  "\x63\x8e\xc2\x5d\xa6\x0f\xb6\x7d"
+			  "\xfb\x6d\x82\x51\xb6\x98\xd0\x71"
+			  "\xe7\x10\x7a\xdf\xb2\xbd\xf1\x1d"
+			  "\x72\x2b\x54\x13\xe3\x6d\x79\x37"
+			  "\xa9\x39\x2c\xdf\x21\xab\x87\xd5"
+			  "\xee\xef\x9a\x12\x50\x39\x2e\x1b"
+			  "\x7d\xe6\x6a\x27\x48\xb9\xe7\xac"
+			  "\xaa\xcd\x79\x5f\xf2\xf3\xa0\x08"
+			  "\x6f\x2c\xf4\x0e\xd1\xb8\x89\x25"
+			  "\x31\x9d\xef\xb1\x1d\x27\x55\x04"
+			  "\xc9\x8c\xb7\x68\xdc\xb6\x67\x8a"
+			  "\xdb\xcf\x22\xf2\x3b\x6f\xce\xbb"
+			  "\x26\xbe\x4f\x27\x04\x42\xd1\x44"
+			  "\x4c\x08\xa3\x95\x4c\x7f\x1a\xaf"
+			  "\x1d\x28\x14\xfd\xb1\x1a\x34\x18"
+			  "\xf5\x1e\x28\x69\x95\x6a\x5a\xba"
+			  "\x8e\xb2\x58\x1d\x28\x17\x13\x3d"
+			  "\x38\x7d\x14\x8d\xab\x5d\xf9\xe8"
+			  "\x3c\x0f\x2b\x0d\x2b\x08\xb4\x4b"
+			  "\x6b\x0d\xc8\xa7\x84\xc2\x3a\x1a"
+			  "\xb7\xbd\xda\x92\x29\xb8\x5b\x5a"
+			  "\x63\xa5\x99\x82\x09\x72\x8f\xc6"
+			  "\xa4\x62\x24\x69\x8c\x2d\x26\x00"
+			  "\x99\x83\x91\xd6\xc6\xcf\x57\x67"
+			  "\x38\xea\xf2\xfc\x29\xe0\x73\x39"
+			  "\xf9\x13\x94\x6d\xe2\x58\x28\x75"
+			  "\x3e\xae\x71\x90\x07\x70\x1c\x38"
+			  "\x5b\x4c\x1e\xb5\xa5\x3b\x20\xef"
+			  "\xb1\x4c\x3e\x1a\x72\x62\xbb\x22"
+			  "\x82\x09\xe3\x18\x3f\x4f\x48\xfc"
+			  "\xdd\xac\xfc\xb6\x09\xdb\xd2\x7b"
+			  "\xd6\xb7\x7e\x41\x2f\x14\xf5\x0e"
+			  "\xc3\xac\x4a\xed\xe7\x82\xef\x31"
+			  "\x1f\x1a\x51\x1e\x29\x60\xc8\x98"
+			  "\x93\x51\x1d\x3d\x62\x59\x83\x82"
+			  "\x0c\xf1\xd7\x8d\xac\x33\x44\x81"
+			  "\x3c\x59\xb7\xd4\x5b\x65\x82\xc4"
+			  "\xec\xdc\x24\xfd\x0e\x1a\x79\x94"
+			  "\x34\xb0\x62\xfa\x98\x49\x26\x1f"
+			  "\xf4\x9e\x40\x44\x5b\x1f\xf8\xbe"
+			  "\x36\xff\xc6\xc6\x9d\xf2\xd6\xcc"
+			  "\x63\x93\x29\xb9\x0b\x6d\xd7\x6c"
+			  "\xdb\xf6\x21\x80\xf7\x5a\x37\x15"
+			  "\x0c\xe3\x36\xc8\x74\x75\x20\x91"
+			  "\xdf\x52\x2d\x0c\xe7\x45\xff\x46"
+			  "\xb3\xf4\xec\xc2\xbd\xd3\x37\xb6"
+			  "\x26\xa2\x5d\x7d\x61\xbf\x10\x46"
+			  "\x57\x8d\x05\x96\x70\x0b\xd6\x41"
+			  "\x5c\xe9\xd3\x54\x81\x39\x3a\xdd"
+			  "\x5f\x92\x81\x6e\x35\x03\xd4\x72"
+			  "\x3d\x5a\xe7\xb9\x3b\x0c\x84\x23"
+			  "\x45\x5d\xec\x72\xc1\x52\xef\x2e"
+			  "\x81\x00\xd3\xfe\x4c\x3c\x05\x61"
+			  "\x80\x18\xc4\x6c\x03\xd3\xb7\xba"
+			  "\x11\xd7\xb8\x6e\xea\xe1\x80\x30",
+		.rlen	= 512,
+	},
+};
+
+static struct cipher_testvec tf_lrw_dec_tv_template[] = {
+	/* Generated from AES-LRW test vectors */
+	/* same as enc vectors with input and result reversed */
+	{
+		.key	= "\x45\x62\xac\x25\xf8\x28\x17\x6d"
+			  "\x4c\x26\x84\x14\xb5\x68\x01\x85"
+			  "\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
+			  "\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\xa1\x6c\x50\x69\x26\xa4\xef\x7b"
+			  "\x7c\xc6\x91\xeb\x72\xdd\x9b\xee",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\x59\x70\x47\x14\xf5\x57\x47\x8c"
+			  "\xd7\x79\xe8\x0f\x54\x88\x79\x44"
+			  "\x0d\x48\xf0\xb7\xb1\x5a\x53\xea"
+			  "\x1c\xaa\x6b\x29\xc2\xca\xfb\xaf",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x02",
+		.input	= "\xab\x72\x0a\xad\x3b\x0c\xf0\xc9"
+			  "\x42\x2f\xf1\xae\xf1\x3c\xb1\xbd",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\xd8\x2a\x91\x34\xb2\x6a\x56\x50"
+			  "\x30\xfe\x69\xe2\x37\x7f\x98\x47"
+			  "\xcd\xf9\x0b\x16\x0c\x64\x8f\xb6"
+			  "\xb0\x0d\x0d\x1b\xae\x85\x87\x1f",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\x85\xa7\x56\x67\x08\xfa\x42\xe1"
+			  "\x22\xe6\x82\xfc\xd9\xb4\xd7\xd4",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\x0f\x6a\xef\xf8\xd3\xd2\xbb\x15"
+			  "\x25\x83\xf7\x3c\x1f\x01\x28\x74"
+			  "\xca\xc6\xbc\x35\x4d\x4a\x65\x54"
+			  "\x90\xae\x61\xcf\x7b\xae\xbd\xcc"
+			  "\xad\xe4\x94\xc5\x4a\x29\xae\x70",
+		.klen	= 40,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\xd2\xaf\x69\x35\x24\x1d\x0e\x1c"
+			  "\x84\x8b\x05\xe4\xa2\x2f\x16\xf5",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\x8a\xd4\xee\x10\x2f\xbd\x81\xff"
+			  "\xf8\x86\xce\xac\x93\xc5\xad\xc6"
+			  "\xa0\x19\x07\xc0\x9d\xf7\xbb\xdd"
+			  "\x52\x13\xb2\xb7\xf0\xff\x11\xd8"
+			  "\xd6\x08\xd0\xcd\x2e\xb1\x17\x6f",
+		.klen	= 40,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\x4a\x23\x56\xd7\xff\x90\xd0\x9a"
+			  "\x0d\x7c\x26\xfc\xf0\xf0\xf6\xe4",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
+			  "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
+			  "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
+			  "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
+			  "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
+			  "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x30\xaf\x26\x05\x9d\x5d\x0a\x58"
+			  "\xe2\xe7\xce\x8a\xb2\x56\x6d\x76",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\xfb\x76\x15\xb2\x3d\x80\x89\x1d"
+			  "\xd4\x70\x98\x0b\xc7\x95\x84\xc8"
+			  "\xb2\xfb\x64\xce\x60\x97\x87\x8d"
+			  "\x17\xfc\xe4\x5a\x49\xe8\x30\xb7"
+			  "\x6e\x78\x17\xe7\x2d\x5e\x12\xd4"
+			  "\x60\x64\x04\x7a\xf1\x2f\x9e\x0c",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x02\x00\x00\x00\x00",
+		.input	= "\xdf\xcf\xdc\xd2\xe1\xcf\x86\x75"
+			  "\x17\x66\x5e\x0c\x14\xa1\x3d\x40",
+		.ilen	= 16,
+		.result	= "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x41\x42\x43\x44\x45\x46",
+		.rlen	= 16,
+	}, {
+		.key	= "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
+			  "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
+			  "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
+			  "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
+			  "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
+			  "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
+		.klen	= 48,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x30\x38\xeb\xaf\x12\x43\x1a\x89"
+			  "\x62\xa2\x36\xe5\xcf\x77\x1e\xd9"
+			  "\x08\xc3\x0d\xdd\x95\xab\x19\x96"
+			  "\x27\x52\x41\xc3\xca\xfb\xf6\xee"
+			  "\x40\x2d\xdf\xdd\x00\x0c\xb9\x0a"
+			  "\x3a\xf0\xc0\xd1\xda\x63\x9e\x45"
+			  "\x42\xe9\x29\xc0\xb4\x07\xb4\x31"
+			  "\x66\x77\x72\xb5\xb6\xb3\x57\x46"
+			  "\x34\x9a\xfe\x03\xaf\x6b\x36\x07"
+			  "\x63\x8e\xc2\x5d\xa6\x0f\xb6\x7d"
+			  "\xfb\x6d\x82\x51\xb6\x98\xd0\x71"
+			  "\xe7\x10\x7a\xdf\xb2\xbd\xf1\x1d"
+			  "\x72\x2b\x54\x13\xe3\x6d\x79\x37"
+			  "\xa9\x39\x2c\xdf\x21\xab\x87\xd5"
+			  "\xee\xef\x9a\x12\x50\x39\x2e\x1b"
+			  "\x7d\xe6\x6a\x27\x48\xb9\xe7\xac"
+			  "\xaa\xcd\x79\x5f\xf2\xf3\xa0\x08"
+			  "\x6f\x2c\xf4\x0e\xd1\xb8\x89\x25"
+			  "\x31\x9d\xef\xb1\x1d\x27\x55\x04"
+			  "\xc9\x8c\xb7\x68\xdc\xb6\x67\x8a"
+			  "\xdb\xcf\x22\xf2\x3b\x6f\xce\xbb"
+			  "\x26\xbe\x4f\x27\x04\x42\xd1\x44"
+			  "\x4c\x08\xa3\x95\x4c\x7f\x1a\xaf"
+			  "\x1d\x28\x14\xfd\xb1\x1a\x34\x18"
+			  "\xf5\x1e\x28\x69\x95\x6a\x5a\xba"
+			  "\x8e\xb2\x58\x1d\x28\x17\x13\x3d"
+			  "\x38\x7d\x14\x8d\xab\x5d\xf9\xe8"
+			  "\x3c\x0f\x2b\x0d\x2b\x08\xb4\x4b"
+			  "\x6b\x0d\xc8\xa7\x84\xc2\x3a\x1a"
+			  "\xb7\xbd\xda\x92\x29\xb8\x5b\x5a"
+			  "\x63\xa5\x99\x82\x09\x72\x8f\xc6"
+			  "\xa4\x62\x24\x69\x8c\x2d\x26\x00"
+			  "\x99\x83\x91\xd6\xc6\xcf\x57\x67"
+			  "\x38\xea\xf2\xfc\x29\xe0\x73\x39"
+			  "\xf9\x13\x94\x6d\xe2\x58\x28\x75"
+			  "\x3e\xae\x71\x90\x07\x70\x1c\x38"
+			  "\x5b\x4c\x1e\xb5\xa5\x3b\x20\xef"
+			  "\xb1\x4c\x3e\x1a\x72\x62\xbb\x22"
+			  "\x82\x09\xe3\x18\x3f\x4f\x48\xfc"
+			  "\xdd\xac\xfc\xb6\x09\xdb\xd2\x7b"
+			  "\xd6\xb7\x7e\x41\x2f\x14\xf5\x0e"
+			  "\xc3\xac\x4a\xed\xe7\x82\xef\x31"
+			  "\x1f\x1a\x51\x1e\x29\x60\xc8\x98"
+			  "\x93\x51\x1d\x3d\x62\x59\x83\x82"
+			  "\x0c\xf1\xd7\x8d\xac\x33\x44\x81"
+			  "\x3c\x59\xb7\xd4\x5b\x65\x82\xc4"
+			  "\xec\xdc\x24\xfd\x0e\x1a\x79\x94"
+			  "\x34\xb0\x62\xfa\x98\x49\x26\x1f"
+			  "\xf4\x9e\x40\x44\x5b\x1f\xf8\xbe"
+			  "\x36\xff\xc6\xc6\x9d\xf2\xd6\xcc"
+			  "\x63\x93\x29\xb9\x0b\x6d\xd7\x6c"
+			  "\xdb\xf6\x21\x80\xf7\x5a\x37\x15"
+			  "\x0c\xe3\x36\xc8\x74\x75\x20\x91"
+			  "\xdf\x52\x2d\x0c\xe7\x45\xff\x46"
+			  "\xb3\xf4\xec\xc2\xbd\xd3\x37\xb6"
+			  "\x26\xa2\x5d\x7d\x61\xbf\x10\x46"
+			  "\x57\x8d\x05\x96\x70\x0b\xd6\x41"
+			  "\x5c\xe9\xd3\x54\x81\x39\x3a\xdd"
+			  "\x5f\x92\x81\x6e\x35\x03\xd4\x72"
+			  "\x3d\x5a\xe7\xb9\x3b\x0c\x84\x23"
+			  "\x45\x5d\xec\x72\xc1\x52\xef\x2e"
+			  "\x81\x00\xd3\xfe\x4c\x3c\x05\x61"
+			  "\x80\x18\xc4\x6c\x03\xd3\xb7\xba"
+			  "\x11\xd7\xb8\x6e\xea\xe1\x80\x30",
+		.ilen	= 512,
+		.result	= "\x05\x11\xb7\x18\xab\xc6\x2d\xac"
+			  "\x70\x5d\xf6\x22\x94\xcd\xe5\x6c"
+			  "\x17\x6b\xf6\x1c\xf0\xf3\x6e\xf8"
+			  "\x50\x38\x1f\x71\x49\xb6\x57\xd6"
+			  "\x8f\xcb\x8d\x6b\xe3\xa6\x29\x90"
+			  "\xfe\x2a\x62\x82\xae\x6d\x8b\xf6"
+			  "\xad\x1e\x9e\x20\x5f\x38\xbe\x04"
+			  "\xda\x10\x8e\xed\xa2\xa4\x87\xab"
+			  "\xda\x6b\xb4\x0c\x75\xba\xd3\x7c"
+			  "\xc9\xac\x42\x31\x95\x7c\xc9\x04"
+			  "\xeb\xd5\x6e\x32\x69\x8a\xdb\xa6"
+			  "\x15\xd7\x3f\x4f\x2f\x66\x69\x03"
+			  "\x9c\x1f\x54\x0f\xde\x1f\xf3\x65"
+			  "\x4c\x96\x12\xed\x7c\x92\x03\x01"
+			  "\x6f\xbc\x35\x93\xac\xf1\x27\xf1"
+			  "\xb4\x96\x82\x5a\x5f\xb0\xa0\x50"
+			  "\x89\xa4\x8e\x66\x44\x85\xcc\xfd"
+			  "\x33\x14\x70\xe3\x96\xb2\xc3\xd3"
+			  "\xbb\x54\x5a\x1a\xf9\x74\xa2\xc5"
+			  "\x2d\x64\x75\xdd\xb4\x54\xe6\x74"
+			  "\x8c\xd3\x9d\x9e\x86\xab\x51\x53"
+			  "\xb7\x93\x3e\x6f\xd0\x4e\x2c\x40"
+			  "\xf6\xa8\x2e\x3e\x9d\xf4\x66\xa5"
+			  "\x76\x12\x73\x44\x1a\x56\xd7\x72"
+			  "\x88\xcd\x21\x8c\x4c\x0f\xfe\xda"
+			  "\x95\xe0\x3a\xa6\xa5\x84\x46\xcd"
+			  "\xd5\x3e\x9d\x3a\xe2\x67\xe6\x60"
+			  "\x1a\xe2\x70\x85\x58\xc2\x1b\x09"
+			  "\xe1\xd7\x2c\xca\xad\xa8\x8f\xf9"
+			  "\xac\xb3\x0e\xdb\xca\x2e\xe2\xb8"
+			  "\x51\x71\xd9\x3c\x6c\xf1\x56\xf8"
+			  "\xea\x9c\xf1\xfb\x0c\xe6\xb7\x10"
+			  "\x1c\xf8\xa9\x7c\xe8\x53\x35\xc1"
+			  "\x90\x3e\x76\x4a\x74\xa4\x21\x2c"
+			  "\xf6\x2c\x4e\x0f\x94\x3a\x88\x2e"
+			  "\x41\x09\x6a\x33\x7d\xf6\xdd\x3f"
+			  "\x8d\x23\x31\x74\x84\xeb\x88\x6e"
+			  "\xcc\xb9\xbc\x22\x83\x19\x07\x22"
+			  "\xa5\x2d\xdf\xa5\xf3\x80\x85\x78"
+			  "\x84\x39\x6a\x6d\x6a\x99\x4f\xa5"
+			  "\x15\xfe\x46\xb0\xe4\x6c\xa5\x41"
+			  "\x3c\xce\x8f\x42\x60\x71\xa7\x75"
+			  "\x08\x40\x65\x8a\x82\xbf\xf5\x43"
+			  "\x71\x96\xa9\x4d\x44\x8a\x20\xbe"
+			  "\xfa\x4d\xbb\xc0\x7d\x31\x96\x65"
+			  "\xe7\x75\xe5\x3e\xfd\x92\x3b\xc9"
+			  "\x55\xbb\x16\x7e\xf7\xc2\x8c\xa4"
+			  "\x40\x1d\xe5\xef\x0e\xdf\xe4\x9a"
+			  "\x62\x73\x65\xfd\x46\x63\x25\x3d"
+			  "\x2b\xaf\xe5\x64\xfe\xa5\x5c\xcf"
+			  "\x24\xf3\xb4\xac\x64\xba\xdf\x4b"
+			  "\xc6\x96\x7d\x81\x2d\x8d\x97\xf7"
+			  "\xc5\x68\x77\x84\x32\x2b\xcc\x85"
+			  "\x74\x96\xf0\x12\x77\x61\xb9\xeb"
+			  "\x71\xaa\x82\xcb\x1c\xdb\x89\xc8"
+			  "\xc6\xb5\xe3\x5c\x7d\x39\x07\x24"
+			  "\xda\x39\x87\x45\xc0\x2b\xbb\x01"
+			  "\xac\xbc\x2a\x5c\x7f\xfc\xe8\xce"
+			  "\x6d\x9c\x6f\xed\xd3\xc1\xa1\xd6"
+			  "\xc5\x55\xa9\x66\x2f\xe1\xc8\x32"
+			  "\xa6\x5d\xa4\x3a\x98\x73\xe8\x45"
+			  "\xa4\xc7\xa8\xb4\xf6\x13\x03\xf6"
+			  "\xe9\x2e\xc4\x29\x0f\x84\xdb\xc4"
+			  "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
+		.rlen	= 512,
+	},
+};
+
 /*
  * Serpent test vectors.  These are backwards because Serpent writes
  * octet sequences in right-to-left mode.

From bee3a90ef5366b58250e4369dac3268ced3351aa Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:32:56 +0300
Subject: [PATCH 13/54] crypto: tcrypt - add lrw(twofish) tests

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/tcrypt.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 9a9e170035d1..01203833ccc2 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -990,6 +990,7 @@ static int do_test(int m)
 		ret += tcrypt_test("ecb(twofish)");
 		ret += tcrypt_test("cbc(twofish)");
 		ret += tcrypt_test("ctr(twofish)");
+		ret += tcrypt_test("lrw(twofish)");
 		break;
 
 	case 9:
@@ -1249,6 +1250,10 @@ static int do_test(int m)
 				speed_template_16_24_32);
 		test_cipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
 				speed_template_16_24_32);
+		test_cipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
+				speed_template_32_40_48);
+		test_cipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
+				speed_template_32_40_48);
 		break;
 
 	case 203:

From 81559f9ad3d88c033e4ec3b6468012dbfda3b31d Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:33:02 +0300
Subject: [PATCH 14/54] crypto: twofish-x86_64-3way - add lrw support

Patch adds LRW support for twofish-x86_64-3way by using lrw_crypt(). Patch has
been tested with tcrypt and automated filesystem tests.

Tcrypt benchmarks results (twofish-3way/twofish-asm speed ratios):

Intel Celeron T1600 (fam:6, model:15, step:13):

size	lrw-enc	lrw-dec
16B	0.99x	1.00x
64B	1.17x	1.17x
256B	1.26x	1.27x
1024B	1.30x	1.31x
8192B	1.31x	1.32x

AMD Phenom II 1055T (fam:16, model:10):

size	lrw-enc	lrw-dec
16B	1.06x	1.01x
64B	1.08x	1.14x
256B	1.19x	1.20x
1024B	1.21x	1.22x
8192B	1.23x	1.24x

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 arch/x86/crypto/twofish_glue_3way.c | 135 ++++++++++++++++++++++++++++
 crypto/twofish_common.c             |  13 +--
 include/crypto/twofish.h            |   2 +
 3 files changed, 145 insertions(+), 5 deletions(-)

diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 5ede9c444c3e..fa9151df3637 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -32,6 +32,11 @@
 #include <crypto/algapi.h>
 #include <crypto/twofish.h>
 #include <crypto/b128ops.h>
+#include <crypto/lrw.h>
+
+#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
+#define HAS_LRW
+#endif
 
 /* regular block cipher functions from twofish_x86_64 module */
 asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
@@ -432,6 +437,124 @@ static struct crypto_alg blk_ctr_alg = {
 	},
 };
 
+#ifdef HAS_LRW
+
+static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+{
+	const unsigned int bsize = TF_BLOCK_SIZE;
+	struct twofish_ctx *ctx = priv;
+	int i;
+
+	if (nbytes == 3 * bsize) {
+		twofish_enc_blk_3way(ctx, srcdst, srcdst);
+		return;
+	}
+
+	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+		twofish_enc_blk(ctx, srcdst, srcdst);
+}
+
+static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+{
+	const unsigned int bsize = TF_BLOCK_SIZE;
+	struct twofish_ctx *ctx = priv;
+	int i;
+
+	if (nbytes == 3 * bsize) {
+		twofish_dec_blk_3way(ctx, srcdst, srcdst);
+		return;
+	}
+
+	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+		twofish_dec_blk(ctx, srcdst, srcdst);
+}
+
+struct twofish_lrw_ctx {
+	struct lrw_table_ctx lrw_table;
+	struct twofish_ctx twofish_ctx;
+};
+
+static int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
+			      unsigned int keylen)
+{
+	struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
+	int err;
+
+	err = __twofish_setkey(&ctx->twofish_ctx, key, keylen - TF_BLOCK_SIZE,
+			       &tfm->crt_flags);
+	if (err)
+		return err;
+
+	return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE);
+}
+
+static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	be128 buf[3];
+	struct lrw_crypt_req req = {
+		.tbuf = buf,
+		.tbuflen = sizeof(buf),
+
+		.table_ctx = &ctx->lrw_table,
+		.crypt_ctx = &ctx->twofish_ctx,
+		.crypt_fn = encrypt_callback,
+	};
+
+	return lrw_crypt(desc, dst, src, nbytes, &req);
+}
+
+static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	be128 buf[3];
+	struct lrw_crypt_req req = {
+		.tbuf = buf,
+		.tbuflen = sizeof(buf),
+
+		.table_ctx = &ctx->lrw_table,
+		.crypt_ctx = &ctx->twofish_ctx,
+		.crypt_fn = decrypt_callback,
+	};
+
+	return lrw_crypt(desc, dst, src, nbytes, &req);
+}
+
+static void lrw_exit_tfm(struct crypto_tfm *tfm)
+{
+	struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	lrw_free_table(&ctx->lrw_table);
+}
+
+static struct crypto_alg blk_lrw_alg = {
+	.cra_name		= "lrw(twofish)",
+	.cra_driver_name	= "lrw-twofish-3way",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= TF_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct twofish_lrw_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(blk_lrw_alg.cra_list),
+	.cra_exit		= lrw_exit_tfm,
+	.cra_u = {
+		.blkcipher = {
+			.min_keysize	= TF_MIN_KEY_SIZE + TF_BLOCK_SIZE,
+			.max_keysize	= TF_MAX_KEY_SIZE + TF_BLOCK_SIZE,
+			.ivsize		= TF_BLOCK_SIZE,
+			.setkey		= lrw_twofish_setkey,
+			.encrypt	= lrw_encrypt,
+			.decrypt	= lrw_decrypt,
+		},
+	},
+};
+
+#endif
+
 int __init init(void)
 {
 	int err;
@@ -445,9 +568,18 @@ int __init init(void)
 	err = crypto_register_alg(&blk_ctr_alg);
 	if (err)
 		goto ctr_err;
+#ifdef HAS_LRW
+	err = crypto_register_alg(&blk_lrw_alg);
+	if (err)
+		goto blk_lrw_err;
+#endif
 
 	return 0;
 
+#ifdef HAS_LRW
+blk_lrw_err:
+	crypto_unregister_alg(&blk_ctr_alg);
+#endif
 ctr_err:
 	crypto_unregister_alg(&blk_cbc_alg);
 cbc_err:
@@ -458,6 +590,9 @@ ecb_err:
 
 void __exit fini(void)
 {
+#ifdef HAS_LRW
+	crypto_unregister_alg(&blk_lrw_alg);
+#endif
 	crypto_unregister_alg(&blk_ctr_alg);
 	crypto_unregister_alg(&blk_cbc_alg);
 	crypto_unregister_alg(&blk_ecb_alg);
diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c
index 0af216c75d7e..5f62c4f9f6e0 100644
--- a/crypto/twofish_common.c
+++ b/crypto/twofish_common.c
@@ -580,12 +580,9 @@ static const u8 calc_sb_tbl[512] = {
    ctx->a[(j) + 1] = rol32(y, 9)
 
 /* Perform the key setup. */
-int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
+int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
+		     unsigned int key_len, u32 *flags)
 {
-
-	struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
-	u32 *flags = &tfm->crt_flags;
-
 	int i, j, k;
 
 	/* Temporaries for CALC_K. */
@@ -701,7 +698,13 @@ int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(__twofish_setkey);
 
+int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
+{
+	return __twofish_setkey(crypto_tfm_ctx(tfm), key, key_len,
+				&tfm->crt_flags);
+}
 EXPORT_SYMBOL_GPL(twofish_setkey);
 
 MODULE_LICENSE("GPL");
diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h
index c408522595c6..095c901a8af3 100644
--- a/include/crypto/twofish.h
+++ b/include/crypto/twofish.h
@@ -17,6 +17,8 @@ struct twofish_ctx {
 	u32 s[4][256], w[8], k[32];
 };
 
+int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
+		     unsigned int key_len, u32 *flags);
 int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len);
 
 #endif

From f9d2691fc9a00f39b587f965c33cca012a5597bc Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:33:07 +0300
Subject: [PATCH 15/54] crypto: xts - use blocksize constant

XTS has fixed blocksize of 16. Define XTS_BLOCK_SIZE and use in place of
crypto_cipher_blocksize().

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/xts.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/crypto/xts.c b/crypto/xts.c
index 851705446c82..96f3f88d576e 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -24,6 +24,8 @@
 #include <crypto/b128ops.h>
 #include <crypto/gf128mul.h>
 
+#define XTS_BLOCK_SIZE 16
+
 struct priv {
 	struct crypto_cipher *child;
 	struct crypto_cipher *tweak;
@@ -96,7 +98,7 @@ static int crypt(struct blkcipher_desc *d,
 {
 	int err;
 	unsigned int avail;
-	const int bs = crypto_cipher_blocksize(ctx->child);
+	const int bs = XTS_BLOCK_SIZE;
 	struct sinfo s = {
 		.tfm = crypto_cipher_tfm(ctx->child),
 		.fn = fn
@@ -177,7 +179,7 @@ static int init_tfm(struct crypto_tfm *tfm)
 	if (IS_ERR(cipher))
 		return PTR_ERR(cipher);
 
-	if (crypto_cipher_blocksize(cipher) != 16) {
+	if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
 		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
 		crypto_free_cipher(cipher);
 		return -EINVAL;
@@ -192,7 +194,7 @@ static int init_tfm(struct crypto_tfm *tfm)
 	}
 
 	/* this check isn't really needed, leave it here just in case */
-	if (crypto_cipher_blocksize(cipher) != 16) {
+	if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
 		crypto_free_cipher(cipher);
 		crypto_free_cipher(ctx->child);
 		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;

From ce0045561e1edb92e4a509eb433ff52d3afaa258 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Wed, 9 Nov 2011 11:56:06 +0800
Subject: [PATCH 16/54] crypto: xts: add interface for parallelized cipher
 implementations

Add xts_crypt() function that can be used by cipher implementations that can
benefit from parallelized cipher operations.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/xts.c         | 75 ++++++++++++++++++++++++++++++++++++++++++--
 include/crypto/xts.h | 27 ++++++++++++++++
 2 files changed, 100 insertions(+), 2 deletions(-)
 create mode 100644 include/crypto/xts.h

diff --git a/crypto/xts.c b/crypto/xts.c
index 96f3f88d576e..ca1608f44cb5 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -21,11 +21,10 @@
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 
+#include <crypto/xts.h>
 #include <crypto/b128ops.h>
 #include <crypto/gf128mul.h>
 
-#define XTS_BLOCK_SIZE 16
-
 struct priv {
 	struct crypto_cipher *child;
 	struct crypto_cipher *tweak;
@@ -167,6 +166,78 @@ static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 		     crypto_cipher_alg(ctx->child)->cia_decrypt);
 }
 
+int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
+	      struct scatterlist *ssrc, unsigned int nbytes,
+	      struct xts_crypt_req *req)
+{
+	const unsigned int bsize = XTS_BLOCK_SIZE;
+	const unsigned int max_blks = req->tbuflen / bsize;
+	struct blkcipher_walk walk;
+	unsigned int nblocks;
+	be128 *src, *dst, *t;
+	be128 *t_buf = req->tbuf;
+	int err, i;
+
+	BUG_ON(max_blks < 1);
+
+	blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
+
+	err = blkcipher_walk_virt(desc, &walk);
+	nbytes = walk.nbytes;
+	if (!nbytes)
+		return err;
+
+	nblocks = min(nbytes / bsize, max_blks);
+	src = (be128 *)walk.src.virt.addr;
+	dst = (be128 *)walk.dst.virt.addr;
+
+	/* calculate first value of T */
+	req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
+
+	i = 0;
+	goto first;
+
+	for (;;) {
+		do {
+			for (i = 0; i < nblocks; i++) {
+				gf128mul_x_ble(&t_buf[i], t);
+first:
+				t = &t_buf[i];
+
+				/* PP <- T xor P */
+				be128_xor(dst + i, t, src + i);
+			}
+
+			/* CC <- E(Key2,PP) */
+			req->crypt_fn(req->crypt_ctx, (u8 *)dst,
+				      nblocks * bsize);
+
+			/* C <- T xor CC */
+			for (i = 0; i < nblocks; i++)
+				be128_xor(dst + i, dst + i, &t_buf[i]);
+
+			src += nblocks;
+			dst += nblocks;
+			nbytes -= nblocks * bsize;
+			nblocks = min(nbytes / bsize, max_blks);
+		} while (nblocks > 0);
+
+		*(be128 *)walk.iv = *t;
+
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+		nbytes = walk.nbytes;
+		if (!nbytes)
+			break;
+
+		nblocks = min(nbytes / bsize, max_blks);
+		src = (be128 *)walk.src.virt.addr;
+		dst = (be128 *)walk.dst.virt.addr;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(xts_crypt);
+
 static int init_tfm(struct crypto_tfm *tfm)
 {
 	struct crypto_cipher *cipher;
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
new file mode 100644
index 000000000000..72c09eb56437
--- /dev/null
+++ b/include/crypto/xts.h
@@ -0,0 +1,27 @@
+#ifndef _CRYPTO_XTS_H
+#define _CRYPTO_XTS_H
+
+#include <crypto/b128ops.h>
+
+struct scatterlist;
+struct blkcipher_desc;
+
+#define XTS_BLOCK_SIZE 16
+
+struct xts_crypt_req {
+	be128 *tbuf;
+	unsigned int tbuflen;
+
+	void *tweak_ctx;
+	void (*tweak_fn)(void *ctx, u8* dst, const u8* src);
+	void *crypt_ctx;
+	void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
+};
+
+#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
+
+int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+	      struct scatterlist *src, unsigned int nbytes,
+	      struct xts_crypt_req *req);
+
+#endif  /* _CRYPTO_XTS_H */

From 18be20b9445731c57ef2fa0c7c9e71ac1b4a7872 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:33:17 +0300
Subject: [PATCH 17/54] crypto: testmgr - add xts(serpent) test vectors

Add test vectors for xts(serpent). These are generated from xts(aes) test vectors.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/testmgr.c |  15 ++
 crypto/testmgr.h | 682 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 697 insertions(+)

diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index ae33d3075ce7..5e349a6bc98a 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2573,6 +2573,21 @@ static const struct alg_test_desc alg_test_descs[] = {
 				}
 			}
 		}
+	}, {
+		.alg = "xts(serpent)",
+		.test = alg_test_skcipher,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = serpent_xts_enc_tv_template,
+					.count = SERPENT_XTS_ENC_TEST_VECTORS
+				},
+				.dec = {
+					.vecs = serpent_xts_dec_tv_template,
+					.count = SERPENT_XTS_DEC_TEST_VECTORS
+				}
+			}
+		}
 	}, {
 		.alg = "zlib",
 		.test = alg_test_pcomp,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 4b887895c1e4..9158a92aa18b 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -3612,6 +3612,9 @@ static struct cipher_testvec tf_lrw_dec_tv_template[] = {
 #define SERPENT_LRW_ENC_TEST_VECTORS	8
 #define SERPENT_LRW_DEC_TEST_VECTORS	8
 
+#define SERPENT_XTS_ENC_TEST_VECTORS	5
+#define SERPENT_XTS_DEC_TEST_VECTORS	5
+
 static struct cipher_testvec serpent_enc_tv_template[] = {
 	{
 		.input	= "\x00\x01\x02\x03\x04\x05\x06\x07"
@@ -4668,6 +4671,685 @@ static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
 	},
 };
 
+static struct cipher_testvec serpent_xts_enc_tv_template[] = {
+	/* Generated from AES-XTS test vectors */
+	{
+		.key	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.ilen	= 32,
+		.result	= "\xe1\x08\xb8\x1d\x2c\xf5\x33\x64"
+			  "\xc8\x12\x04\xc7\xb3\x70\xe8\xc4"
+			  "\x6a\x31\xc5\xf3\x00\xca\xb9\x16"
+			  "\xde\xe2\x77\x66\xf7\xfe\x62\x08",
+		.rlen	= 32,
+	}, {
+		.key	= "\x11\x11\x11\x11\x11\x11\x11\x11"
+			  "\x11\x11\x11\x11\x11\x11\x11\x11"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22",
+		.klen	= 32,
+		.iv	= "\x33\x33\x33\x33\x33\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44",
+		.ilen	= 32,
+		.result	= "\x1a\x0a\x09\x5f\xcd\x07\x07\x98"
+			  "\x41\x86\x12\xaf\xb3\xd7\x68\x13"
+			  "\xed\x81\xcd\x06\x87\x43\x1a\xbb"
+			  "\x13\x3d\xd6\x1e\x2b\xe1\x77\xbe",
+		.rlen	= 32,
+	}, {
+		.key	= "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+			  "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22",
+		.klen	= 32,
+		.iv	= "\x33\x33\x33\x33\x33\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44",
+		.ilen	= 32,
+		.result	= "\xf9\x9b\x28\xb8\x5c\xaf\x8c\x61"
+			  "\xb6\x1c\x81\x8f\x2c\x87\x60\x89"
+			  "\x0d\x8d\x7a\xe8\x60\x48\xcc\x86"
+			  "\xc1\x68\x45\xaa\x00\xe9\x24\xc5",
+		.rlen	= 32,
+	}, {
+		.key	= "\x27\x18\x28\x18\x28\x45\x90\x45"
+			  "\x23\x53\x60\x28\x74\x71\x35\x26"
+			  "\x31\x41\x59\x26\x53\x58\x97\x93"
+			  "\x23\x84\x62\x64\x33\x83\x27\x95",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+			  "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ilen	= 512,
+		.result	= "\xfe\x47\x4a\xc8\x60\x7e\xb4\x8b"
+			  "\x0d\x10\xf4\xb0\x0d\xba\xf8\x53"
+			  "\x65\x6e\x38\x4b\xdb\xaa\xb1\x9e"
+			  "\x28\xca\xb0\x22\xb3\x85\x75\xf4"
+			  "\x00\x5c\x75\x14\x06\xd6\x25\x82"
+			  "\xe6\xcb\x08\xf7\x29\x90\x23\x8e"
+			  "\xa4\x68\x57\xe4\xf0\xd8\x32\xf3"
+			  "\x80\x51\x67\xb5\x0b\x85\x69\xe8"
+			  "\x19\xfe\xc4\xc7\x3e\xea\x90\xd3"
+			  "\x8f\xa3\xf2\x0a\xac\x17\x4b\xa0"
+			  "\x63\x5a\x16\x0f\xf0\xce\x66\x1f"
+			  "\x2c\x21\x07\xf1\xa4\x03\xa3\x44"
+			  "\x41\x61\x87\x5d\x6b\xb3\xef\xd4"
+			  "\xfc\xaa\x32\x7e\x55\x58\x04\x41"
+			  "\xc9\x07\x33\xc6\xa2\x68\xd6\x5a"
+			  "\x55\x79\x4b\x6f\xcf\x89\xb9\x19"
+			  "\xe5\x54\x13\x15\xb2\x1a\xfa\x15"
+			  "\xc2\xf0\x06\x59\xfa\xa0\x25\x05"
+			  "\x58\xfa\x43\x91\x16\x85\x40\xbb"
+			  "\x0d\x34\x4d\xc5\x1e\x20\xd5\x08"
+			  "\xcd\x22\x22\x41\x11\x9f\x6c\x7c"
+			  "\x8d\x57\xc9\xba\x57\xe8\x2c\xf7"
+			  "\xa0\x42\xa8\xde\xfc\xa3\xca\x98"
+			  "\x4b\x43\xb1\xce\x4b\xbf\x01\x67"
+			  "\x6e\x29\x60\xbd\x10\x14\x84\x82"
+			  "\x83\x82\x0c\x63\x73\x92\x02\x7c"
+			  "\x55\x37\x20\x80\x17\x51\xc8\xbc"
+			  "\x46\x02\xcb\x38\x07\x6d\xe2\x85"
+			  "\xaa\x29\xaf\x24\x58\x0d\xf0\x75"
+			  "\x08\x0a\xa5\x34\x25\x16\xf3\x74"
+			  "\xa7\x0b\x97\xbe\xc1\xa9\xdc\x29"
+			  "\x1a\x0a\x56\xc1\x1a\x91\x97\x8c"
+			  "\x0b\xc7\x16\xed\x5a\x22\xa6\x2e"
+			  "\x8c\x2b\x4f\x54\x76\x47\x53\x8e"
+			  "\xe8\x00\xec\x92\xb9\x55\xe6\xa2"
+			  "\xf3\xe2\x4f\x6a\x66\x60\xd0\x87"
+			  "\xe6\xd1\xcc\xe3\x6a\xc5\x2d\x21"
+			  "\xcc\x9d\x6a\xb6\x75\xaa\xe2\x19"
+			  "\x21\x9f\xa1\x5e\x4c\xfd\x72\xf9"
+			  "\x94\x4e\x63\xc7\xae\xfc\xed\x47"
+			  "\xe2\xfe\x7a\x63\x77\xfe\x97\x82"
+			  "\xb1\x10\x6e\x36\x1d\xe1\xc4\x80"
+			  "\xec\x69\x41\xec\xa7\x8a\xe0\x2f"
+			  "\xe3\x49\x26\xa2\x41\xb2\x08\x0f"
+			  "\x28\xb4\xa7\x39\xa1\x99\x2d\x1e"
+			  "\x43\x42\x35\xd0\xcf\xec\x77\x67"
+			  "\xb2\x3b\x9e\x1c\x35\xde\x4f\x5e"
+			  "\x73\x3f\x5d\x6f\x07\x4b\x2e\x50"
+			  "\xab\x6c\x6b\xff\xea\x00\x67\xaa"
+			  "\x0e\x82\x32\xdd\x3d\xb5\xe5\x76"
+			  "\x2b\x77\x3f\xbe\x12\x75\xfb\x92"
+			  "\xc6\x89\x67\x4d\xca\xf7\xd4\x50"
+			  "\xc0\x74\x47\xcc\xd9\x0a\xd4\xc6"
+			  "\x3b\x17\x2e\xe3\x35\xbb\x53\xb5"
+			  "\x86\xad\x51\xcc\xd5\x96\xb8\xdc"
+			  "\x03\x57\xe6\x98\x52\x2f\x61\x62"
+			  "\xc4\x5c\x9c\x36\x71\x07\xfb\x94"
+			  "\xe3\x02\xc4\x2b\x08\x75\xc7\x35"
+			  "\xfb\x2e\x88\x7b\xbb\x67\x00\xe1"
+			  "\xc9\xdd\x99\xb2\x13\x53\x1a\x4e"
+			  "\x76\x87\x19\x04\x1a\x2f\x38\x3e"
+			  "\xef\x91\x64\x1d\x18\x07\x4e\x31"
+			  "\x88\x21\x7c\xb0\xa5\x12\x4c\x3c"
+			  "\xb0\x20\xbd\xda\xdf\xf9\x7c\xdd",
+		.rlen	= 512,
+	}, {
+		.key	= "\x27\x18\x28\x18\x28\x45\x90\x45"
+			  "\x23\x53\x60\x28\x74\x71\x35\x26"
+			  "\x62\x49\x77\x57\x24\x70\x93\x69"
+			  "\x99\x59\x57\x49\x66\x96\x76\x27"
+			  "\x31\x41\x59\x26\x53\x58\x97\x93"
+			  "\x23\x84\x62\x64\x33\x83\x27\x95"
+			  "\x02\x88\x41\x97\x16\x93\x99\x37"
+			  "\x51\x05\x82\x09\x74\x94\x45\x92",
+		.klen	= 64,
+		.iv	= "\xff\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+			  "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ilen	= 512,
+		.result	= "\x2b\xc9\xb4\x6b\x10\x94\xa9\x32"
+			  "\xaa\xb0\x20\xc6\x44\x3d\x74\x1f"
+			  "\x75\x01\xa7\xf6\xf5\xf7\x62\x1b"
+			  "\x80\x1b\x82\xcb\x01\x59\x91\x7f"
+			  "\x80\x3a\x98\xf0\xd2\xca\xc4\xc3"
+			  "\x34\xfd\xe6\x11\xf9\x33\x45\x12"
+			  "\x48\xc5\x8c\x25\xf1\xc5\xc5\x23"
+			  "\xd3\x44\xb4\x73\xd5\x04\xc0\xb7"
+			  "\xca\x2f\xf5\xcd\xc5\xb4\xdd\xb0"
+			  "\xf4\x60\xe8\xfb\xc6\x9c\xc5\x78"
+			  "\xcd\xec\x7d\xdc\x19\x9c\x72\x64"
+			  "\x63\x0b\x38\x2e\x76\xdd\x2d\x36"
+			  "\x49\xb0\x1d\xea\x78\x9e\x00\xca"
+			  "\x20\xcc\x1b\x1e\x98\x74\xab\xed"
+			  "\x79\xf7\xd0\x6c\xd8\x93\x80\x29"
+			  "\xac\xa5\x5e\x34\xa9\xab\xa0\x55"
+			  "\x9a\xea\xaa\x95\x4d\x7b\xfe\x46"
+			  "\x26\x8a\xfd\x88\xa2\xa8\xa6\xae"
+			  "\x25\x42\x17\xbf\x76\x8f\x1c\x3d"
+			  "\xec\x9a\xda\x64\x96\xb5\x61\xff"
+			  "\x99\xeb\x12\x96\x85\x82\x9d\xd5"
+			  "\x81\x85\x14\xa8\x59\xac\x8c\x94"
+			  "\xbb\x3b\x85\x2b\xdf\xb3\x0c\xba"
+			  "\x82\xc6\x4d\xca\x86\xea\x53\x28"
+			  "\x4c\xe0\x4e\x31\xe3\x73\x2f\x79"
+			  "\x9d\x42\xe1\x03\xe3\x8b\xc4\xff"
+			  "\x05\xca\x81\x7b\xda\xa2\xde\x63"
+			  "\x3a\x10\xbe\xc2\xac\x32\xc4\x05"
+			  "\x47\x7e\xef\x67\xe2\x5f\x5b\xae"
+			  "\xed\xf1\x70\x34\x16\x9a\x07\x7b"
+			  "\xf2\x25\x2b\xb0\xf8\x3c\x15\x9a"
+			  "\xa6\x59\x55\x5f\xc1\xf4\x1e\xcd"
+			  "\x93\x1f\x06\xba\xd4\x9a\x22\x69"
+			  "\xfa\x8e\x95\x0d\xf3\x23\x59\x2c"
+			  "\xfe\x00\xba\xf0\x0e\xbc\x6d\xd6"
+			  "\x62\xf0\x7a\x0e\x83\x3e\xdb\x32"
+			  "\xfd\x43\x7d\xda\x42\x51\x87\x43"
+			  "\x9d\xf9\xef\xf4\x30\x97\xf8\x09"
+			  "\x88\xfc\x3f\x93\x70\xc1\x4a\xec"
+			  "\x27\x5f\x11\xac\x71\xc7\x48\x46"
+			  "\x2f\xf9\xdf\x8d\x9f\xf7\x2e\x56"
+			  "\x0d\x4e\xb0\x32\x76\xce\x86\x81"
+			  "\xcd\xdf\xe4\x00\xbf\xfd\x5f\x24"
+			  "\xaf\xf7\x9a\xde\xff\x18\xac\x14"
+			  "\x90\xc5\x01\x39\x34\x0f\x24\xf3"
+			  "\x13\x2f\x5e\x4f\x30\x9a\x36\x40"
+			  "\xec\xea\xbc\xcd\x9e\x0e\x5b\x23"
+			  "\x50\x88\x97\x40\x69\xb1\x37\xf5"
+			  "\xc3\x15\xf9\x3f\xb7\x79\x64\xe8"
+			  "\x7b\x10\x20\xb9\x2b\x46\x83\x5b"
+			  "\xd8\x39\xfc\xe4\xfa\x88\x52\xf2"
+			  "\x72\xb0\x97\x4e\x89\xb3\x48\x00"
+			  "\xc1\x16\x73\x50\x77\xba\xa6\x65"
+			  "\x20\x2d\xb0\x02\x27\x89\xda\x99"
+			  "\x45\xfb\xe9\xd3\x1d\x39\x2f\xd6"
+			  "\x2a\xda\x09\x12\x11\xaf\xe6\x57"
+			  "\x01\x04\x8a\xff\x86\x8b\xac\xf8"
+			  "\xee\xe4\x1c\x98\x5b\xcf\x6b\x76"
+			  "\xa3\x0e\x33\x74\x40\x18\x39\x72"
+			  "\x66\x50\x31\xfd\x70\xdf\xe8\x51"
+			  "\x96\x21\x36\xb2\x9b\xfa\x85\xd1"
+			  "\x30\x05\xc8\x92\x98\x80\xff\x7a"
+			  "\xaf\x43\x0b\xc5\x20\x41\x92\x20"
+			  "\xd4\xa0\x91\x98\x11\x5f\x4d\xb1",
+		.rlen	= 512,
+	},
+};
+
+static struct cipher_testvec serpent_xts_dec_tv_template[] = {
+	/* Generated from AES-XTS test vectors */
+	/* same as enc vectors with input and result reversed */
+	{
+		.key	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\xe1\x08\xb8\x1d\x2c\xf5\x33\x64"
+			  "\xc8\x12\x04\xc7\xb3\x70\xe8\xc4"
+			  "\x6a\x31\xc5\xf3\x00\xca\xb9\x16"
+			  "\xde\xe2\x77\x66\xf7\xfe\x62\x08",
+		.ilen	= 32,
+		.result	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.rlen	= 32,
+	}, {
+		.key	= "\x11\x11\x11\x11\x11\x11\x11\x11"
+			  "\x11\x11\x11\x11\x11\x11\x11\x11"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22",
+		.klen	= 32,
+		.iv	= "\x33\x33\x33\x33\x33\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x1a\x0a\x09\x5f\xcd\x07\x07\x98"
+			  "\x41\x86\x12\xaf\xb3\xd7\x68\x13"
+			  "\xed\x81\xcd\x06\x87\x43\x1a\xbb"
+			  "\x13\x3d\xd6\x1e\x2b\xe1\x77\xbe",
+		.ilen	= 32,
+		.result	= "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44",
+		.rlen	= 32,
+	}, {
+		.key	= "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+			  "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22",
+		.klen	= 32,
+		.iv	= "\x33\x33\x33\x33\x33\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\xf9\x9b\x28\xb8\x5c\xaf\x8c\x61"
+			  "\xb6\x1c\x81\x8f\x2c\x87\x60\x89"
+			  "\x0d\x8d\x7a\xe8\x60\x48\xcc\x86"
+			  "\xc1\x68\x45\xaa\x00\xe9\x24\xc5",
+		.ilen	= 32,
+		.result	= "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44",
+		.rlen	= 32,
+	}, {
+		.key	= "\x27\x18\x28\x18\x28\x45\x90\x45"
+			  "\x23\x53\x60\x28\x74\x71\x35\x26"
+			  "\x31\x41\x59\x26\x53\x58\x97\x93"
+			  "\x23\x84\x62\x64\x33\x83\x27\x95",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\xfe\x47\x4a\xc8\x60\x7e\xb4\x8b"
+			  "\x0d\x10\xf4\xb0\x0d\xba\xf8\x53"
+			  "\x65\x6e\x38\x4b\xdb\xaa\xb1\x9e"
+			  "\x28\xca\xb0\x22\xb3\x85\x75\xf4"
+			  "\x00\x5c\x75\x14\x06\xd6\x25\x82"
+			  "\xe6\xcb\x08\xf7\x29\x90\x23\x8e"
+			  "\xa4\x68\x57\xe4\xf0\xd8\x32\xf3"
+			  "\x80\x51\x67\xb5\x0b\x85\x69\xe8"
+			  "\x19\xfe\xc4\xc7\x3e\xea\x90\xd3"
+			  "\x8f\xa3\xf2\x0a\xac\x17\x4b\xa0"
+			  "\x63\x5a\x16\x0f\xf0\xce\x66\x1f"
+			  "\x2c\x21\x07\xf1\xa4\x03\xa3\x44"
+			  "\x41\x61\x87\x5d\x6b\xb3\xef\xd4"
+			  "\xfc\xaa\x32\x7e\x55\x58\x04\x41"
+			  "\xc9\x07\x33\xc6\xa2\x68\xd6\x5a"
+			  "\x55\x79\x4b\x6f\xcf\x89\xb9\x19"
+			  "\xe5\x54\x13\x15\xb2\x1a\xfa\x15"
+			  "\xc2\xf0\x06\x59\xfa\xa0\x25\x05"
+			  "\x58\xfa\x43\x91\x16\x85\x40\xbb"
+			  "\x0d\x34\x4d\xc5\x1e\x20\xd5\x08"
+			  "\xcd\x22\x22\x41\x11\x9f\x6c\x7c"
+			  "\x8d\x57\xc9\xba\x57\xe8\x2c\xf7"
+			  "\xa0\x42\xa8\xde\xfc\xa3\xca\x98"
+			  "\x4b\x43\xb1\xce\x4b\xbf\x01\x67"
+			  "\x6e\x29\x60\xbd\x10\x14\x84\x82"
+			  "\x83\x82\x0c\x63\x73\x92\x02\x7c"
+			  "\x55\x37\x20\x80\x17\x51\xc8\xbc"
+			  "\x46\x02\xcb\x38\x07\x6d\xe2\x85"
+			  "\xaa\x29\xaf\x24\x58\x0d\xf0\x75"
+			  "\x08\x0a\xa5\x34\x25\x16\xf3\x74"
+			  "\xa7\x0b\x97\xbe\xc1\xa9\xdc\x29"
+			  "\x1a\x0a\x56\xc1\x1a\x91\x97\x8c"
+			  "\x0b\xc7\x16\xed\x5a\x22\xa6\x2e"
+			  "\x8c\x2b\x4f\x54\x76\x47\x53\x8e"
+			  "\xe8\x00\xec\x92\xb9\x55\xe6\xa2"
+			  "\xf3\xe2\x4f\x6a\x66\x60\xd0\x87"
+			  "\xe6\xd1\xcc\xe3\x6a\xc5\x2d\x21"
+			  "\xcc\x9d\x6a\xb6\x75\xaa\xe2\x19"
+			  "\x21\x9f\xa1\x5e\x4c\xfd\x72\xf9"
+			  "\x94\x4e\x63\xc7\xae\xfc\xed\x47"
+			  "\xe2\xfe\x7a\x63\x77\xfe\x97\x82"
+			  "\xb1\x10\x6e\x36\x1d\xe1\xc4\x80"
+			  "\xec\x69\x41\xec\xa7\x8a\xe0\x2f"
+			  "\xe3\x49\x26\xa2\x41\xb2\x08\x0f"
+			  "\x28\xb4\xa7\x39\xa1\x99\x2d\x1e"
+			  "\x43\x42\x35\xd0\xcf\xec\x77\x67"
+			  "\xb2\x3b\x9e\x1c\x35\xde\x4f\x5e"
+			  "\x73\x3f\x5d\x6f\x07\x4b\x2e\x50"
+			  "\xab\x6c\x6b\xff\xea\x00\x67\xaa"
+			  "\x0e\x82\x32\xdd\x3d\xb5\xe5\x76"
+			  "\x2b\x77\x3f\xbe\x12\x75\xfb\x92"
+			  "\xc6\x89\x67\x4d\xca\xf7\xd4\x50"
+			  "\xc0\x74\x47\xcc\xd9\x0a\xd4\xc6"
+			  "\x3b\x17\x2e\xe3\x35\xbb\x53\xb5"
+			  "\x86\xad\x51\xcc\xd5\x96\xb8\xdc"
+			  "\x03\x57\xe6\x98\x52\x2f\x61\x62"
+			  "\xc4\x5c\x9c\x36\x71\x07\xfb\x94"
+			  "\xe3\x02\xc4\x2b\x08\x75\xc7\x35"
+			  "\xfb\x2e\x88\x7b\xbb\x67\x00\xe1"
+			  "\xc9\xdd\x99\xb2\x13\x53\x1a\x4e"
+			  "\x76\x87\x19\x04\x1a\x2f\x38\x3e"
+			  "\xef\x91\x64\x1d\x18\x07\x4e\x31"
+			  "\x88\x21\x7c\xb0\xa5\x12\x4c\x3c"
+			  "\xb0\x20\xbd\xda\xdf\xf9\x7c\xdd",
+		.ilen	= 512,
+		.result	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+			  "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.rlen	= 512,
+	}, {
+		.key	= "\x27\x18\x28\x18\x28\x45\x90\x45"
+			  "\x23\x53\x60\x28\x74\x71\x35\x26"
+			  "\x62\x49\x77\x57\x24\x70\x93\x69"
+			  "\x99\x59\x57\x49\x66\x96\x76\x27"
+			  "\x31\x41\x59\x26\x53\x58\x97\x93"
+			  "\x23\x84\x62\x64\x33\x83\x27\x95"
+			  "\x02\x88\x41\x97\x16\x93\x99\x37"
+			  "\x51\x05\x82\x09\x74\x94\x45\x92",
+		.klen	= 64,
+		.iv	= "\xff\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x2b\xc9\xb4\x6b\x10\x94\xa9\x32"
+			  "\xaa\xb0\x20\xc6\x44\x3d\x74\x1f"
+			  "\x75\x01\xa7\xf6\xf5\xf7\x62\x1b"
+			  "\x80\x1b\x82\xcb\x01\x59\x91\x7f"
+			  "\x80\x3a\x98\xf0\xd2\xca\xc4\xc3"
+			  "\x34\xfd\xe6\x11\xf9\x33\x45\x12"
+			  "\x48\xc5\x8c\x25\xf1\xc5\xc5\x23"
+			  "\xd3\x44\xb4\x73\xd5\x04\xc0\xb7"
+			  "\xca\x2f\xf5\xcd\xc5\xb4\xdd\xb0"
+			  "\xf4\x60\xe8\xfb\xc6\x9c\xc5\x78"
+			  "\xcd\xec\x7d\xdc\x19\x9c\x72\x64"
+			  "\x63\x0b\x38\x2e\x76\xdd\x2d\x36"
+			  "\x49\xb0\x1d\xea\x78\x9e\x00\xca"
+			  "\x20\xcc\x1b\x1e\x98\x74\xab\xed"
+			  "\x79\xf7\xd0\x6c\xd8\x93\x80\x29"
+			  "\xac\xa5\x5e\x34\xa9\xab\xa0\x55"
+			  "\x9a\xea\xaa\x95\x4d\x7b\xfe\x46"
+			  "\x26\x8a\xfd\x88\xa2\xa8\xa6\xae"
+			  "\x25\x42\x17\xbf\x76\x8f\x1c\x3d"
+			  "\xec\x9a\xda\x64\x96\xb5\x61\xff"
+			  "\x99\xeb\x12\x96\x85\x82\x9d\xd5"
+			  "\x81\x85\x14\xa8\x59\xac\x8c\x94"
+			  "\xbb\x3b\x85\x2b\xdf\xb3\x0c\xba"
+			  "\x82\xc6\x4d\xca\x86\xea\x53\x28"
+			  "\x4c\xe0\x4e\x31\xe3\x73\x2f\x79"
+			  "\x9d\x42\xe1\x03\xe3\x8b\xc4\xff"
+			  "\x05\xca\x81\x7b\xda\xa2\xde\x63"
+			  "\x3a\x10\xbe\xc2\xac\x32\xc4\x05"
+			  "\x47\x7e\xef\x67\xe2\x5f\x5b\xae"
+			  "\xed\xf1\x70\x34\x16\x9a\x07\x7b"
+			  "\xf2\x25\x2b\xb0\xf8\x3c\x15\x9a"
+			  "\xa6\x59\x55\x5f\xc1\xf4\x1e\xcd"
+			  "\x93\x1f\x06\xba\xd4\x9a\x22\x69"
+			  "\xfa\x8e\x95\x0d\xf3\x23\x59\x2c"
+			  "\xfe\x00\xba\xf0\x0e\xbc\x6d\xd6"
+			  "\x62\xf0\x7a\x0e\x83\x3e\xdb\x32"
+			  "\xfd\x43\x7d\xda\x42\x51\x87\x43"
+			  "\x9d\xf9\xef\xf4\x30\x97\xf8\x09"
+			  "\x88\xfc\x3f\x93\x70\xc1\x4a\xec"
+			  "\x27\x5f\x11\xac\x71\xc7\x48\x46"
+			  "\x2f\xf9\xdf\x8d\x9f\xf7\x2e\x56"
+			  "\x0d\x4e\xb0\x32\x76\xce\x86\x81"
+			  "\xcd\xdf\xe4\x00\xbf\xfd\x5f\x24"
+			  "\xaf\xf7\x9a\xde\xff\x18\xac\x14"
+			  "\x90\xc5\x01\x39\x34\x0f\x24\xf3"
+			  "\x13\x2f\x5e\x4f\x30\x9a\x36\x40"
+			  "\xec\xea\xbc\xcd\x9e\x0e\x5b\x23"
+			  "\x50\x88\x97\x40\x69\xb1\x37\xf5"
+			  "\xc3\x15\xf9\x3f\xb7\x79\x64\xe8"
+			  "\x7b\x10\x20\xb9\x2b\x46\x83\x5b"
+			  "\xd8\x39\xfc\xe4\xfa\x88\x52\xf2"
+			  "\x72\xb0\x97\x4e\x89\xb3\x48\x00"
+			  "\xc1\x16\x73\x50\x77\xba\xa6\x65"
+			  "\x20\x2d\xb0\x02\x27\x89\xda\x99"
+			  "\x45\xfb\xe9\xd3\x1d\x39\x2f\xd6"
+			  "\x2a\xda\x09\x12\x11\xaf\xe6\x57"
+			  "\x01\x04\x8a\xff\x86\x8b\xac\xf8"
+			  "\xee\xe4\x1c\x98\x5b\xcf\x6b\x76"
+			  "\xa3\x0e\x33\x74\x40\x18\x39\x72"
+			  "\x66\x50\x31\xfd\x70\xdf\xe8\x51"
+			  "\x96\x21\x36\xb2\x9b\xfa\x85\xd1"
+			  "\x30\x05\xc8\x92\x98\x80\xff\x7a"
+			  "\xaf\x43\x0b\xc5\x20\x41\x92\x20"
+			  "\xd4\xa0\x91\x98\x11\x5f\x4d\xb1",
+		.ilen	= 512,
+		.result	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+			  "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.rlen	= 512,
+	},
+};
+
 /* Cast6 test vectors from RFC 2612 */
 #define CAST6_ENC_TEST_VECTORS	3
 #define CAST6_DEC_TEST_VECTORS  3

From 5209c07ac3601cfdbe2edff016e80ad93cee8dbc Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:33:22 +0300
Subject: [PATCH 18/54] crypto: tcrypt - add xts(serpent) tests

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/tcrypt.c | 9 +++++++++
 crypto/tcrypt.h | 1 +
 2 files changed, 10 insertions(+)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 01203833ccc2..a66459594bed 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -998,6 +998,7 @@ static int do_test(int m)
 		ret += tcrypt_test("cbc(serpent)");
 		ret += tcrypt_test("ctr(serpent)");
 		ret += tcrypt_test("lrw(serpent)");
+		ret += tcrypt_test("xts(serpent)");
 		break;
 
 	case 10:
@@ -1315,6 +1316,10 @@ static int do_test(int m)
 				  speed_template_32_48);
 		test_cipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
 				  speed_template_32_48);
+		test_cipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
+				  speed_template_32_64);
+		test_cipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
+				  speed_template_32_64);
 		break;
 
 	case 300:
@@ -1535,6 +1540,10 @@ static int do_test(int m)
 				   speed_template_32_48);
 		test_acipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
 				   speed_template_32_48);
+		test_acipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
+				   speed_template_32_64);
+		test_acipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
+				   speed_template_32_64);
 		break;
 
 	case 1000:
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 3eceaef2754d..5be1fc8c1ab3 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -53,6 +53,7 @@ static u8 speed_template_16_24_32[] = {16, 24, 32, 0};
 static u8 speed_template_32_40_48[] = {32, 40, 48, 0};
 static u8 speed_template_32_48[] = {32, 48, 0};
 static u8 speed_template_32_48_64[] = {32, 48, 64, 0};
+static u8 speed_template_32_64[] = {32, 64, 0};
 
 /*
  * Digest speed tests

From aed265b9fef4a6389e81b98b5c5eb5cd80ef5ead Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:33:33 +0300
Subject: [PATCH 19/54] crypto: testmgr - add xts(twofish) test vectors

Add test vectors for xts(twofish). These are generated from xts(twofish) test vectors.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/testmgr.c |  15 ++
 crypto/testmgr.h | 681 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 696 insertions(+)

diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 5e349a6bc98a..01553a6754b7 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2588,6 +2588,21 @@ static const struct alg_test_desc alg_test_descs[] = {
 				}
 			}
 		}
+	}, {
+		.alg = "xts(twofish)",
+		.test = alg_test_skcipher,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = tf_xts_enc_tv_template,
+					.count = TF_XTS_ENC_TEST_VECTORS
+				},
+				.dec = {
+					.vecs = tf_xts_dec_tv_template,
+					.count = TF_XTS_DEC_TEST_VECTORS
+				}
+			}
+		}
 	}, {
 		.alg = "zlib",
 		.test = alg_test_pcomp,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 9158a92aa18b..43e84d32b341 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -2719,6 +2719,8 @@ static struct cipher_testvec bf_ctr_dec_tv_template[] = {
 #define TF_CTR_DEC_TEST_VECTORS		2
 #define TF_LRW_ENC_TEST_VECTORS		8
 #define TF_LRW_DEC_TEST_VECTORS		8
+#define TF_XTS_ENC_TEST_VECTORS		5
+#define TF_XTS_DEC_TEST_VECTORS		5
 
 static struct cipher_testvec tf_enc_tv_template[] = {
 	{
@@ -3593,6 +3595,685 @@ static struct cipher_testvec tf_lrw_dec_tv_template[] = {
 	},
 };
 
+static struct cipher_testvec tf_xts_enc_tv_template[] = {
+	/* Generated from AES-XTS test vectors */
+{
+		.key	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.ilen	= 32,
+		.result	= "\x4b\xc9\x44\x4a\x11\xa3\xef\xac"
+			  "\x30\x74\xe4\x44\x52\x77\x97\x43"
+			  "\xa7\x60\xb2\x45\x2e\xf9\x00\x90"
+			  "\x9f\xaa\xfd\x89\x6e\x9d\x4a\xe0",
+		.rlen	= 32,
+	}, {
+		.key	= "\x11\x11\x11\x11\x11\x11\x11\x11"
+			  "\x11\x11\x11\x11\x11\x11\x11\x11"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22",
+		.klen	= 32,
+		.iv	= "\x33\x33\x33\x33\x33\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44",
+		.ilen	= 32,
+		.result	= "\x57\x0e\x8f\xe5\x2a\x35\x61\x4f"
+			  "\x32\xd3\xbd\x36\x05\x15\x44\x2c"
+			  "\x58\x06\xf7\xf8\x00\xa8\xb6\xd5"
+			  "\xc6\x28\x92\xdb\xd8\x34\xa2\xe9",
+		.rlen	= 32,
+	}, {
+		.key	= "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+			  "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22",
+		.klen	= 32,
+		.iv	= "\x33\x33\x33\x33\x33\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44",
+		.ilen	= 32,
+		.result	= "\x96\x45\x8f\x8d\x7a\x75\xb1\xde"
+			  "\x40\x0c\x89\x56\xf6\x4d\xa7\x07"
+			  "\x38\xbb\x5b\xe9\xcd\x84\xae\xb2"
+			  "\x7b\x6a\x62\xf4\x8c\xb5\x37\xea",
+		.rlen	= 32,
+	}, {
+		.key	= "\x27\x18\x28\x18\x28\x45\x90\x45"
+			  "\x23\x53\x60\x28\x74\x71\x35\x26"
+			  "\x31\x41\x59\x26\x53\x58\x97\x93"
+			  "\x23\x84\x62\x64\x33\x83\x27\x95",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+			  "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ilen	= 512,
+		.result	= "\xa9\x78\xae\x1e\xea\xa2\x44\x4c"
+			  "\xa2\x7a\x64\x1f\xaf\x46\xc1\xe0"
+			  "\x6c\xb2\xf3\x92\x9a\xd6\x7d\x58"
+			  "\xb8\x2d\xb9\x5d\x58\x07\x66\x50"
+			  "\xea\x35\x35\x8c\xb2\x46\x61\x06"
+			  "\x5d\x65\xfc\x57\x8f\x69\x74\xab"
+			  "\x8a\x06\x69\xb5\x6c\xda\x66\xc7"
+			  "\x52\x90\xbb\x8e\x6d\x8b\xb5\xa2"
+			  "\x78\x1d\xc2\xa9\xc2\x73\x00\xc3"
+			  "\x32\x36\x7c\x97\x6b\x4e\x8a\x50"
+			  "\xe4\x91\x83\x96\x8f\xf4\x94\x1a"
+			  "\xa6\x27\xe1\x33\xcb\x91\xc6\x5f"
+			  "\x94\x75\xbc\xd7\x3e\x3e\x6f\x9e"
+			  "\xa9\x31\x80\x5e\xe5\xdb\xc8\x53"
+			  "\x01\x73\x68\x32\x25\x19\xfa\xfb"
+			  "\xe4\xcf\xb9\x3e\xa2\xa0\x8f\x31"
+			  "\xbf\x54\x06\x93\xa8\xb1\x0f\xb6"
+			  "\x7c\x3c\xde\x6f\x0f\xfb\x0c\x11"
+			  "\x39\x80\x39\x09\x97\x65\xf2\x83"
+			  "\xae\xe6\xa1\x6f\x47\xb8\x49\xde"
+			  "\x99\x36\x20\x7d\x97\x3b\xec\xfa"
+			  "\xb4\x33\x6e\x7a\xc7\x46\x84\x49"
+			  "\x91\xcd\xe1\x57\x0d\xed\x40\x08"
+			  "\x13\xf1\x4e\x3e\xa4\xa4\x5c\xe6"
+			  "\xd2\x0c\x20\x8f\x3e\xdf\x3f\x47"
+			  "\x9a\x2f\xde\x6d\x66\xc9\x99\x4a"
+			  "\x2d\x9e\x9d\x4b\x1a\x27\xa2\x12"
+			  "\x99\xf0\xf8\xb1\xb6\xf6\x57\xc3"
+			  "\xca\x1c\xa3\x8e\xed\x39\x28\xb5"
+			  "\x10\x1b\x4b\x08\x42\x00\x4a\xd3"
+			  "\xad\x5a\xc6\x8e\xc8\xbb\x95\xc4"
+			  "\x4b\xaa\xfe\xd5\x42\xa8\xa3\x6d"
+			  "\x3c\xf3\x34\x91\x2d\xb4\xdd\x20"
+			  "\x0c\x90\x6d\xa3\x9b\x66\x9d\x24"
+			  "\x02\xa6\xa9\x3f\x3f\x58\x5d\x47"
+			  "\x24\x65\x63\x7e\xbd\x8c\xe6\x52"
+			  "\x7d\xef\x33\x53\x63\xec\xaa\x0b"
+			  "\x64\x15\xa9\xa6\x1f\x10\x00\x38"
+			  "\x35\xa8\xe7\xbe\x23\x70\x22\xe0"
+			  "\xd3\xb9\xe6\xfd\xe6\xaa\x03\x50"
+			  "\xf3\x3c\x27\x36\x8b\xcc\xfe\x9c"
+			  "\x9c\xa3\xb3\xe7\x68\x9b\xa2\x71"
+			  "\xe0\x07\xd9\x1f\x68\x1f\xac\x5e"
+			  "\x7a\x74\x85\xa9\x6a\x90\xab\x2c"
+			  "\x38\x51\xbc\x1f\x43\x4a\x56\x1c"
+			  "\xf8\x47\x03\x4e\x67\xa8\x1f\x99"
+			  "\x04\x39\x73\x32\xb2\x86\x79\xe7"
+			  "\x14\x28\x70\xb8\xe2\x7d\x69\x85"
+			  "\xb6\x0f\xc5\xd0\xd0\x01\x5c\xe6"
+			  "\x09\x0f\x75\xf7\xb6\x81\xd2\x11"
+			  "\x20\x9c\xa1\xee\x11\x44\x79\xd0"
+			  "\xb2\x34\x77\xda\x10\x9a\x6f\x6f"
+			  "\xef\x7c\xd9\xdc\x35\xb7\x61\xdd"
+			  "\xf1\xa4\xc6\x1c\xbf\x05\x22\xac"
+			  "\xfe\x2f\x85\x00\x44\xdf\x33\x16"
+			  "\x35\xb6\xa3\xd3\x70\xdf\x69\x35"
+			  "\x6a\xc7\xb4\x99\x45\x27\xc8\x8e"
+			  "\x5a\x14\x30\xd0\x55\x3e\x4f\x64"
+			  "\x0d\x38\xe3\xdf\x8b\xa8\x93\x26"
+			  "\x75\xae\xf6\xb5\x23\x0b\x17\x31"
+			  "\xbf\x27\xb8\xb5\x94\x31\xa7\x8f"
+			  "\x43\xc4\x46\x24\x22\x4f\x8f\x7e"
+			  "\xe5\xf4\x6d\x1e\x0e\x18\x7a\xbb"
+			  "\xa6\x8f\xfb\x49\x49\xd8\x7e\x5a",
+		.rlen	= 512,
+	}, {
+		.key	= "\x27\x18\x28\x18\x28\x45\x90\x45"
+			  "\x23\x53\x60\x28\x74\x71\x35\x26"
+			  "\x62\x49\x77\x57\x24\x70\x93\x69"
+			  "\x99\x59\x57\x49\x66\x96\x76\x27"
+			  "\x31\x41\x59\x26\x53\x58\x97\x93"
+			  "\x23\x84\x62\x64\x33\x83\x27\x95"
+			  "\x02\x88\x41\x97\x16\x93\x99\x37"
+			  "\x51\x05\x82\x09\x74\x94\x45\x92",
+		.klen	= 64,
+		.iv	= "\xff\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+			  "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.ilen	= 512,
+		.result	= "\xd7\x4b\x93\x7d\x13\xa2\xa2\xe1"
+			  "\x35\x39\x71\x88\x76\x1e\xc9\xea"
+			  "\x86\xad\xf3\x14\x48\x3d\x5e\xe9"
+			  "\xe9\x2d\xb2\x56\x59\x35\x9d\xec"
+			  "\x84\xfa\x7e\x9d\x6d\x33\x36\x8f"
+			  "\xce\xf4\xa9\x21\x0b\x5f\x96\xec"
+			  "\xcb\xf9\x57\x68\x33\x88\x39\xbf"
+			  "\x2f\xbb\x59\x03\xbd\x66\x8b\x11"
+			  "\x11\x65\x51\x2e\xb8\x67\x05\xd1"
+			  "\x27\x11\x5c\xd4\xcc\x97\xc2\xb3"
+			  "\xa9\x55\xaf\x07\x56\xd1\xdc\xf5"
+			  "\x85\xdc\x46\xe6\xf0\x24\xeb\x93"
+			  "\x4d\xf0\x9b\xf5\x73\x1c\xda\x03"
+			  "\x22\xc8\x3a\x4f\xb4\x19\x91\x09"
+			  "\x54\x0b\xf6\xfe\x17\x3d\x1a\x53"
+			  "\x72\x60\x79\xcb\x0e\x32\x8a\x77"
+			  "\xd5\xed\xdb\x33\xd7\x62\x16\x69"
+			  "\x63\xe0\xab\xb5\xf6\x9c\x5f\x3d"
+			  "\x69\x35\x61\x86\xf8\x86\xb9\x89"
+			  "\x6e\x59\x35\xac\xf6\x6b\x33\xa0"
+			  "\xea\xef\x96\x62\xd8\xa9\xcf\x56"
+			  "\xbf\xdb\x8a\xfd\xa1\x82\x77\x73"
+			  "\x3d\x94\x4a\x49\x42\x6d\x08\x60"
+			  "\xa1\xea\xab\xb6\x88\x13\x94\xb8"
+			  "\x51\x98\xdb\x35\x85\xdf\xf6\xb9"
+			  "\x8f\xcd\xdf\x80\xd3\x40\x2d\x72"
+			  "\xb8\xb2\x6c\x02\x43\x35\x22\x2a"
+			  "\x31\xed\xcd\x16\x19\xdf\x62\x0f"
+			  "\x29\xcf\x87\x04\xec\x02\x4f\xe4"
+			  "\xa2\xed\x73\xc6\x69\xd3\x7e\x89"
+			  "\x0b\x76\x10\x7c\xd6\xf9\x6a\x25"
+			  "\xed\xcc\x60\x5d\x61\x20\xc1\x97"
+			  "\x56\x91\x57\x28\xbe\x71\x0d\xcd"
+			  "\xde\xc4\x9e\x55\x91\xbe\xd1\x28"
+			  "\x9b\x90\xeb\x73\xf3\x68\x51\xc6"
+			  "\xdf\x82\xcc\xd8\x1f\xce\x5b\x27"
+			  "\xc0\x60\x5e\x33\xd6\xa7\x20\xea"
+			  "\xb2\x54\xc7\x5d\x6a\x3b\x67\x47"
+			  "\xcf\xa0\xe3\xab\x86\xaf\xc1\x42"
+			  "\xe6\xb0\x23\x4a\xaf\x53\xdf\xa0"
+			  "\xad\x12\x32\x31\x03\xf7\x21\xbe"
+			  "\x2d\xd5\x82\x42\xb6\x4a\x3d\xcd"
+			  "\xd8\x81\x77\xa9\x49\x98\x6c\x09"
+			  "\xc5\xa3\x61\x12\x62\x85\x6b\xcd"
+			  "\xb3\xf4\x20\x0c\x41\xc4\x05\x37"
+			  "\x46\x5f\xeb\x71\x8b\xf1\xaf\x6e"
+			  "\xba\xf3\x50\x2e\xfe\xa8\x37\xeb"
+			  "\xe8\x8c\x4f\xa4\x0c\xf1\x31\xc8"
+			  "\x6e\x71\x4f\xa5\xd7\x97\x73\xe0"
+			  "\x93\x4a\x2f\xda\x7b\xe0\x20\x54"
+			  "\x1f\x8d\x85\x79\x0b\x7b\x5e\x75"
+			  "\xb9\x07\x67\xcc\xc8\xe7\x21\x15"
+			  "\xa7\xc8\x98\xff\x4b\x80\x1c\x12"
+			  "\xa8\x54\xe1\x38\x52\xe6\x74\x81"
+			  "\x97\x47\xa1\x41\x0e\xc0\x50\xe3"
+			  "\x55\x0e\xc3\xa7\x70\x77\xce\x07"
+			  "\xed\x8c\x88\xe6\xa1\x5b\x14\xec"
+			  "\xe6\xde\x06\x6d\x74\xc5\xd9\xfa"
+			  "\xe5\x2f\x5a\xff\xc8\x05\xee\x27"
+			  "\x35\x61\xbf\x0b\x19\x78\x9b\xd2"
+			  "\x04\xc7\x05\xb1\x79\xb4\xff\x5f"
+			  "\xf3\xea\x67\x52\x78\xc2\xce\x70"
+			  "\xa4\x05\x0b\xb2\xb3\xa8\x30\x97"
+			  "\x37\x30\xe1\x91\x8d\xb3\x2a\xff",
+		.rlen	= 512,
+	},
+};
+
+static struct cipher_testvec tf_xts_dec_tv_template[] = {
+	/* Generated from AES-XTS test vectors */
+	/* same as enc vectors with input and result reversed */
+	{
+		.key	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x4b\xc9\x44\x4a\x11\xa3\xef\xac"
+			  "\x30\x74\xe4\x44\x52\x77\x97\x43"
+			  "\xa7\x60\xb2\x45\x2e\xf9\x00\x90"
+			  "\x9f\xaa\xfd\x89\x6e\x9d\x4a\xe0",
+		.ilen	= 32,
+		.result	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.rlen	= 32,
+	}, {
+		.key	= "\x11\x11\x11\x11\x11\x11\x11\x11"
+			  "\x11\x11\x11\x11\x11\x11\x11\x11"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22",
+		.klen	= 32,
+		.iv	= "\x33\x33\x33\x33\x33\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x57\x0e\x8f\xe5\x2a\x35\x61\x4f"
+			  "\x32\xd3\xbd\x36\x05\x15\x44\x2c"
+			  "\x58\x06\xf7\xf8\x00\xa8\xb6\xd5"
+			  "\xc6\x28\x92\xdb\xd8\x34\xa2\xe9",
+		.ilen	= 32,
+		.result	= "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44",
+		.rlen	= 32,
+	}, {
+		.key	= "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
+			  "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22"
+			  "\x22\x22\x22\x22\x22\x22\x22\x22",
+		.klen	= 32,
+		.iv	= "\x33\x33\x33\x33\x33\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x96\x45\x8f\x8d\x7a\x75\xb1\xde"
+			  "\x40\x0c\x89\x56\xf6\x4d\xa7\x07"
+			  "\x38\xbb\x5b\xe9\xcd\x84\xae\xb2"
+			  "\x7b\x6a\x62\xf4\x8c\xb5\x37\xea",
+		.ilen	= 32,
+		.result	= "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44"
+			  "\x44\x44\x44\x44\x44\x44\x44\x44",
+		.rlen	= 32,
+	}, {
+		.key	= "\x27\x18\x28\x18\x28\x45\x90\x45"
+			  "\x23\x53\x60\x28\x74\x71\x35\x26"
+			  "\x31\x41\x59\x26\x53\x58\x97\x93"
+			  "\x23\x84\x62\x64\x33\x83\x27\x95",
+		.klen	= 32,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\xa9\x78\xae\x1e\xea\xa2\x44\x4c"
+			  "\xa2\x7a\x64\x1f\xaf\x46\xc1\xe0"
+			  "\x6c\xb2\xf3\x92\x9a\xd6\x7d\x58"
+			  "\xb8\x2d\xb9\x5d\x58\x07\x66\x50"
+			  "\xea\x35\x35\x8c\xb2\x46\x61\x06"
+			  "\x5d\x65\xfc\x57\x8f\x69\x74\xab"
+			  "\x8a\x06\x69\xb5\x6c\xda\x66\xc7"
+			  "\x52\x90\xbb\x8e\x6d\x8b\xb5\xa2"
+			  "\x78\x1d\xc2\xa9\xc2\x73\x00\xc3"
+			  "\x32\x36\x7c\x97\x6b\x4e\x8a\x50"
+			  "\xe4\x91\x83\x96\x8f\xf4\x94\x1a"
+			  "\xa6\x27\xe1\x33\xcb\x91\xc6\x5f"
+			  "\x94\x75\xbc\xd7\x3e\x3e\x6f\x9e"
+			  "\xa9\x31\x80\x5e\xe5\xdb\xc8\x53"
+			  "\x01\x73\x68\x32\x25\x19\xfa\xfb"
+			  "\xe4\xcf\xb9\x3e\xa2\xa0\x8f\x31"
+			  "\xbf\x54\x06\x93\xa8\xb1\x0f\xb6"
+			  "\x7c\x3c\xde\x6f\x0f\xfb\x0c\x11"
+			  "\x39\x80\x39\x09\x97\x65\xf2\x83"
+			  "\xae\xe6\xa1\x6f\x47\xb8\x49\xde"
+			  "\x99\x36\x20\x7d\x97\x3b\xec\xfa"
+			  "\xb4\x33\x6e\x7a\xc7\x46\x84\x49"
+			  "\x91\xcd\xe1\x57\x0d\xed\x40\x08"
+			  "\x13\xf1\x4e\x3e\xa4\xa4\x5c\xe6"
+			  "\xd2\x0c\x20\x8f\x3e\xdf\x3f\x47"
+			  "\x9a\x2f\xde\x6d\x66\xc9\x99\x4a"
+			  "\x2d\x9e\x9d\x4b\x1a\x27\xa2\x12"
+			  "\x99\xf0\xf8\xb1\xb6\xf6\x57\xc3"
+			  "\xca\x1c\xa3\x8e\xed\x39\x28\xb5"
+			  "\x10\x1b\x4b\x08\x42\x00\x4a\xd3"
+			  "\xad\x5a\xc6\x8e\xc8\xbb\x95\xc4"
+			  "\x4b\xaa\xfe\xd5\x42\xa8\xa3\x6d"
+			  "\x3c\xf3\x34\x91\x2d\xb4\xdd\x20"
+			  "\x0c\x90\x6d\xa3\x9b\x66\x9d\x24"
+			  "\x02\xa6\xa9\x3f\x3f\x58\x5d\x47"
+			  "\x24\x65\x63\x7e\xbd\x8c\xe6\x52"
+			  "\x7d\xef\x33\x53\x63\xec\xaa\x0b"
+			  "\x64\x15\xa9\xa6\x1f\x10\x00\x38"
+			  "\x35\xa8\xe7\xbe\x23\x70\x22\xe0"
+			  "\xd3\xb9\xe6\xfd\xe6\xaa\x03\x50"
+			  "\xf3\x3c\x27\x36\x8b\xcc\xfe\x9c"
+			  "\x9c\xa3\xb3\xe7\x68\x9b\xa2\x71"
+			  "\xe0\x07\xd9\x1f\x68\x1f\xac\x5e"
+			  "\x7a\x74\x85\xa9\x6a\x90\xab\x2c"
+			  "\x38\x51\xbc\x1f\x43\x4a\x56\x1c"
+			  "\xf8\x47\x03\x4e\x67\xa8\x1f\x99"
+			  "\x04\x39\x73\x32\xb2\x86\x79\xe7"
+			  "\x14\x28\x70\xb8\xe2\x7d\x69\x85"
+			  "\xb6\x0f\xc5\xd0\xd0\x01\x5c\xe6"
+			  "\x09\x0f\x75\xf7\xb6\x81\xd2\x11"
+			  "\x20\x9c\xa1\xee\x11\x44\x79\xd0"
+			  "\xb2\x34\x77\xda\x10\x9a\x6f\x6f"
+			  "\xef\x7c\xd9\xdc\x35\xb7\x61\xdd"
+			  "\xf1\xa4\xc6\x1c\xbf\x05\x22\xac"
+			  "\xfe\x2f\x85\x00\x44\xdf\x33\x16"
+			  "\x35\xb6\xa3\xd3\x70\xdf\x69\x35"
+			  "\x6a\xc7\xb4\x99\x45\x27\xc8\x8e"
+			  "\x5a\x14\x30\xd0\x55\x3e\x4f\x64"
+			  "\x0d\x38\xe3\xdf\x8b\xa8\x93\x26"
+			  "\x75\xae\xf6\xb5\x23\x0b\x17\x31"
+			  "\xbf\x27\xb8\xb5\x94\x31\xa7\x8f"
+			  "\x43\xc4\x46\x24\x22\x4f\x8f\x7e"
+			  "\xe5\xf4\x6d\x1e\x0e\x18\x7a\xbb"
+			  "\xa6\x8f\xfb\x49\x49\xd8\x7e\x5a",
+		.ilen	= 512,
+		.result	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+			  "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.rlen	= 512,
+	}, {
+		.key	= "\x27\x18\x28\x18\x28\x45\x90\x45"
+			  "\x23\x53\x60\x28\x74\x71\x35\x26"
+			  "\x62\x49\x77\x57\x24\x70\x93\x69"
+			  "\x99\x59\x57\x49\x66\x96\x76\x27"
+			  "\x31\x41\x59\x26\x53\x58\x97\x93"
+			  "\x23\x84\x62\x64\x33\x83\x27\x95"
+			  "\x02\x88\x41\x97\x16\x93\x99\x37"
+			  "\x51\x05\x82\x09\x74\x94\x45\x92",
+		.klen	= 64,
+		.iv	= "\xff\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\xd7\x4b\x93\x7d\x13\xa2\xa2\xe1"
+			  "\x35\x39\x71\x88\x76\x1e\xc9\xea"
+			  "\x86\xad\xf3\x14\x48\x3d\x5e\xe9"
+			  "\xe9\x2d\xb2\x56\x59\x35\x9d\xec"
+			  "\x84\xfa\x7e\x9d\x6d\x33\x36\x8f"
+			  "\xce\xf4\xa9\x21\x0b\x5f\x96\xec"
+			  "\xcb\xf9\x57\x68\x33\x88\x39\xbf"
+			  "\x2f\xbb\x59\x03\xbd\x66\x8b\x11"
+			  "\x11\x65\x51\x2e\xb8\x67\x05\xd1"
+			  "\x27\x11\x5c\xd4\xcc\x97\xc2\xb3"
+			  "\xa9\x55\xaf\x07\x56\xd1\xdc\xf5"
+			  "\x85\xdc\x46\xe6\xf0\x24\xeb\x93"
+			  "\x4d\xf0\x9b\xf5\x73\x1c\xda\x03"
+			  "\x22\xc8\x3a\x4f\xb4\x19\x91\x09"
+			  "\x54\x0b\xf6\xfe\x17\x3d\x1a\x53"
+			  "\x72\x60\x79\xcb\x0e\x32\x8a\x77"
+			  "\xd5\xed\xdb\x33\xd7\x62\x16\x69"
+			  "\x63\xe0\xab\xb5\xf6\x9c\x5f\x3d"
+			  "\x69\x35\x61\x86\xf8\x86\xb9\x89"
+			  "\x6e\x59\x35\xac\xf6\x6b\x33\xa0"
+			  "\xea\xef\x96\x62\xd8\xa9\xcf\x56"
+			  "\xbf\xdb\x8a\xfd\xa1\x82\x77\x73"
+			  "\x3d\x94\x4a\x49\x42\x6d\x08\x60"
+			  "\xa1\xea\xab\xb6\x88\x13\x94\xb8"
+			  "\x51\x98\xdb\x35\x85\xdf\xf6\xb9"
+			  "\x8f\xcd\xdf\x80\xd3\x40\x2d\x72"
+			  "\xb8\xb2\x6c\x02\x43\x35\x22\x2a"
+			  "\x31\xed\xcd\x16\x19\xdf\x62\x0f"
+			  "\x29\xcf\x87\x04\xec\x02\x4f\xe4"
+			  "\xa2\xed\x73\xc6\x69\xd3\x7e\x89"
+			  "\x0b\x76\x10\x7c\xd6\xf9\x6a\x25"
+			  "\xed\xcc\x60\x5d\x61\x20\xc1\x97"
+			  "\x56\x91\x57\x28\xbe\x71\x0d\xcd"
+			  "\xde\xc4\x9e\x55\x91\xbe\xd1\x28"
+			  "\x9b\x90\xeb\x73\xf3\x68\x51\xc6"
+			  "\xdf\x82\xcc\xd8\x1f\xce\x5b\x27"
+			  "\xc0\x60\x5e\x33\xd6\xa7\x20\xea"
+			  "\xb2\x54\xc7\x5d\x6a\x3b\x67\x47"
+			  "\xcf\xa0\xe3\xab\x86\xaf\xc1\x42"
+			  "\xe6\xb0\x23\x4a\xaf\x53\xdf\xa0"
+			  "\xad\x12\x32\x31\x03\xf7\x21\xbe"
+			  "\x2d\xd5\x82\x42\xb6\x4a\x3d\xcd"
+			  "\xd8\x81\x77\xa9\x49\x98\x6c\x09"
+			  "\xc5\xa3\x61\x12\x62\x85\x6b\xcd"
+			  "\xb3\xf4\x20\x0c\x41\xc4\x05\x37"
+			  "\x46\x5f\xeb\x71\x8b\xf1\xaf\x6e"
+			  "\xba\xf3\x50\x2e\xfe\xa8\x37\xeb"
+			  "\xe8\x8c\x4f\xa4\x0c\xf1\x31\xc8"
+			  "\x6e\x71\x4f\xa5\xd7\x97\x73\xe0"
+			  "\x93\x4a\x2f\xda\x7b\xe0\x20\x54"
+			  "\x1f\x8d\x85\x79\x0b\x7b\x5e\x75"
+			  "\xb9\x07\x67\xcc\xc8\xe7\x21\x15"
+			  "\xa7\xc8\x98\xff\x4b\x80\x1c\x12"
+			  "\xa8\x54\xe1\x38\x52\xe6\x74\x81"
+			  "\x97\x47\xa1\x41\x0e\xc0\x50\xe3"
+			  "\x55\x0e\xc3\xa7\x70\x77\xce\x07"
+			  "\xed\x8c\x88\xe6\xa1\x5b\x14\xec"
+			  "\xe6\xde\x06\x6d\x74\xc5\xd9\xfa"
+			  "\xe5\x2f\x5a\xff\xc8\x05\xee\x27"
+			  "\x35\x61\xbf\x0b\x19\x78\x9b\xd2"
+			  "\x04\xc7\x05\xb1\x79\xb4\xff\x5f"
+			  "\xf3\xea\x67\x52\x78\xc2\xce\x70"
+			  "\xa4\x05\x0b\xb2\xb3\xa8\x30\x97"
+			  "\x37\x30\xe1\x91\x8d\xb3\x2a\xff",
+		.ilen	= 512,
+		.result	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
+			  "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x10\x11\x12\x13\x14\x15\x16\x17"
+			  "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+			  "\x20\x21\x22\x23\x24\x25\x26\x27"
+			  "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+			  "\x30\x31\x32\x33\x34\x35\x36\x37"
+			  "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+			  "\x40\x41\x42\x43\x44\x45\x46\x47"
+			  "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+			  "\x50\x51\x52\x53\x54\x55\x56\x57"
+			  "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+			  "\x60\x61\x62\x63\x64\x65\x66\x67"
+			  "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+			  "\x70\x71\x72\x73\x74\x75\x76\x77"
+			  "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+			  "\x80\x81\x82\x83\x84\x85\x86\x87"
+			  "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+			  "\x90\x91\x92\x93\x94\x95\x96\x97"
+			  "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+			  "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+			  "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+			  "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+			  "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+			  "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+			  "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+			  "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
+			  "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+			  "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
+			  "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+		.rlen	= 512,
+	},
+};
+
 /*
  * Serpent test vectors.  These are backwards because Serpent writes
  * octet sequences in right-to-left mode.

From 131f754161bc01fcf7fbbb08c754ed0e5a62b524 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:33:38 +0300
Subject: [PATCH 20/54] crypto: tcrypt - add xts(twofish) tests

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/tcrypt.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index a66459594bed..7736a9f05aba 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -991,6 +991,7 @@ static int do_test(int m)
 		ret += tcrypt_test("cbc(twofish)");
 		ret += tcrypt_test("ctr(twofish)");
 		ret += tcrypt_test("lrw(twofish)");
+		ret += tcrypt_test("xts(twofish)");
 		break;
 
 	case 9:
@@ -1255,6 +1256,10 @@ static int do_test(int m)
 				speed_template_32_40_48);
 		test_cipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
 				speed_template_32_40_48);
+		test_cipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
+				speed_template_32_48_64);
+		test_cipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
+				speed_template_32_48_64);
 		break;
 
 	case 203:

From bae6d3038b7faff187f4207448a40b9912cf787d Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 18 Oct 2011 13:33:43 +0300
Subject: [PATCH 21/54] crypto: twofish-x86_64-3way - add xts support

Patch adds XTS support for twofish-x86_64-3way by using xts_crypt(). Patch has
been tested with tcrypt and automated filesystem tests.

Tcrypt benchmarks results (twofish-3way/twofish-asm speed ratios):

Intel Celeron T1600 (fam:6, model:15, step:13):

size    xts-enc xts-dec
16B     0.98x   1.00x
64B     1.14x   1.15x
256B    1.23x   1.25x
1024B   1.26x   1.29x
8192B   1.28x   1.30x

AMD Phenom II 1055T (fam:16, model:10):

size    xts-enc xts-dec
16B     1.03x   1.03x
64B     1.13x   1.16x
256B    1.20x   1.20x
1024B   1.22x   1.22x
8192B   1.22x   1.21x

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 arch/x86/crypto/twofish_glue_3way.c | 123 +++++++++++++++++++++++++++-
 1 file changed, 119 insertions(+), 4 deletions(-)

diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index fa9151df3637..954f59eeb7b4 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -33,11 +33,16 @@
 #include <crypto/twofish.h>
 #include <crypto/b128ops.h>
 #include <crypto/lrw.h>
+#include <crypto/xts.h>
 
 #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
 #define HAS_LRW
 #endif
 
+#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
+#define HAS_XTS
+#endif
+
 /* regular block cipher functions from twofish_x86_64 module */
 asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
 				const u8 *src);
@@ -437,7 +442,7 @@ static struct crypto_alg blk_ctr_alg = {
 	},
 };
 
-#ifdef HAS_LRW
+#if defined(HAS_LRW) || defined(HAS_XTS)
 
 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
 {
@@ -469,6 +474,10 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
 		twofish_dec_blk(ctx, srcdst, srcdst);
 }
 
+#endif
+
+#ifdef HAS_LRW
+
 struct twofish_lrw_ctx {
 	struct lrw_table_ctx lrw_table;
 	struct twofish_ctx twofish_ctx;
@@ -555,6 +564,99 @@ static struct crypto_alg blk_lrw_alg = {
 
 #endif
 
+#ifdef HAS_XTS
+
+struct twofish_xts_ctx {
+	struct twofish_ctx tweak_ctx;
+	struct twofish_ctx crypt_ctx;
+};
+
+static int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
+			      unsigned int keylen)
+{
+	struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 *flags = &tfm->crt_flags;
+	int err;
+
+	/* key consists of keys of equal size concatenated, therefore
+	 * the length must be even
+	 */
+	if (keylen % 2) {
+		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+
+	/* first half of xts-key is for crypt */
+	err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags);
+	if (err)
+		return err;
+
+	/* second half of xts-key is for tweak */
+	return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2,
+				flags);
+}
+
+static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	be128 buf[3];
+	struct xts_crypt_req req = {
+		.tbuf = buf,
+		.tbuflen = sizeof(buf),
+
+		.tweak_ctx = &ctx->tweak_ctx,
+		.tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
+		.crypt_ctx = &ctx->crypt_ctx,
+		.crypt_fn = encrypt_callback,
+	};
+
+	return xts_crypt(desc, dst, src, nbytes, &req);
+}
+
+static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	be128 buf[3];
+	struct xts_crypt_req req = {
+		.tbuf = buf,
+		.tbuflen = sizeof(buf),
+
+		.tweak_ctx = &ctx->tweak_ctx,
+		.tweak_fn = XTS_TWEAK_CAST(twofish_enc_blk),
+		.crypt_ctx = &ctx->crypt_ctx,
+		.crypt_fn = decrypt_callback,
+	};
+
+	return xts_crypt(desc, dst, src, nbytes, &req);
+}
+
+static struct crypto_alg blk_xts_alg = {
+	.cra_name		= "xts(twofish)",
+	.cra_driver_name	= "xts-twofish-3way",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= TF_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct twofish_xts_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(blk_xts_alg.cra_list),
+	.cra_u = {
+		.blkcipher = {
+			.min_keysize	= TF_MIN_KEY_SIZE * 2,
+			.max_keysize	= TF_MAX_KEY_SIZE * 2,
+			.ivsize		= TF_BLOCK_SIZE,
+			.setkey		= xts_twofish_setkey,
+			.encrypt	= xts_encrypt,
+			.decrypt	= xts_decrypt,
+		},
+	},
+};
+
+#endif
+
 int __init init(void)
 {
 	int err;
@@ -573,13 +675,23 @@ int __init init(void)
 	if (err)
 		goto blk_lrw_err;
 #endif
+#ifdef HAS_XTS
+	err = crypto_register_alg(&blk_xts_alg);
+	if (err)
+		goto blk_xts_err;
+#endif
 
 	return 0;
 
-#ifdef HAS_LRW
-blk_lrw_err:
-	crypto_unregister_alg(&blk_ctr_alg);
+#ifdef HAS_XTS
+	crypto_unregister_alg(&blk_xts_alg);
+blk_xts_err:
 #endif
+#ifdef HAS_LRW
+	crypto_unregister_alg(&blk_lrw_alg);
+blk_lrw_err:
+#endif
+	crypto_unregister_alg(&blk_ctr_alg);
 ctr_err:
 	crypto_unregister_alg(&blk_cbc_alg);
 cbc_err:
@@ -590,6 +702,9 @@ ecb_err:
 
 void __exit fini(void)
 {
+#ifdef HAS_XTS
+	crypto_unregister_alg(&blk_xts_alg);
+#endif
 #ifdef HAS_LRW
 	crypto_unregister_alg(&blk_lrw_alg);
 #endif

From 505172e11f5a0d9916e20e40d3b0a6f87d3a59b6 Mon Sep 17 00:00:00 2001
From: Jarod Wilson <jarod@redhat.com>
Date: Wed, 9 Nov 2011 12:04:06 +0800
Subject: [PATCH 22/54] crypto: ansi_cprng - enforce key != seed in fips mode

Apparently, NIST is tightening up its requirements for FIPS validation
with respect to RNGs. Its always been required that in fips mode, the
ansi cprng not be fed key and seed material that was identical, but
they're now interpreting FIPS 140-2, section AS07.09 as requiring that
the implementation itself must enforce the requirement. Easy fix, we
just do a memcmp of key and seed in fips_cprng_reset and call it a day.

v2: Per Neil's advice, ensure slen is sufficiently long before we
compare key and seed to avoid looking at potentially unallocated mem.

CC: Stephan Mueller <smueller@atsec.com>
CC: Steve Grubb <sgrubb@redhat.com>
Signed-off-by: Jarod Wilson <jarod@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/ansi_cprng.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index ffa0245e2abc..6ddd99e6114b 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -414,10 +414,18 @@ static int fips_cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
 static int fips_cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
 {
 	u8 rdata[DEFAULT_BLK_SZ];
+	u8 *key = seed + DEFAULT_BLK_SZ;
 	int rc;
 
 	struct prng_context *prng = crypto_rng_ctx(tfm);
 
+	if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
+		return -EINVAL;
+
+	/* fips strictly requires seed != key */
+	if (!memcmp(seed, key, DEFAULT_PRNG_KSZ))
+		return -EINVAL;
+
 	rc = cprng_reset(tfm, seed, slen);
 
 	if (!rc)

From ce3fd840f588d85a8c1be651cf90fa1ba1f029e9 Mon Sep 17 00:00:00 2001
From: Steffen Klassert <steffen.klassert@secunet.com>
Date: Tue, 8 Nov 2011 10:09:17 +0100
Subject: [PATCH 23/54] crypto: Unlink and free instances when deleted

We leak the crypto instance when we unregister an instance with
crypto_del_alg(). Therefore we introduce crypto_unregister_instance()
to unlink the crypto instance from the template's instances list and
to free the recources of the instance properly.

Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/algapi.c         | 29 +++++++++++++++++++++++++++++
 crypto/crypto_user.c    |  2 +-
 include/crypto/algapi.h |  1 +
 3 files changed, 31 insertions(+), 1 deletion(-)

diff --git a/crypto/algapi.c b/crypto/algapi.c
index 54dd4e33b5d6..9d4a9fe913f8 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -518,6 +518,35 @@ err:
 }
 EXPORT_SYMBOL_GPL(crypto_register_instance);
 
+int crypto_unregister_instance(struct crypto_alg *alg)
+{
+	int err;
+	struct crypto_instance *inst = (void *)alg;
+	struct crypto_template *tmpl = inst->tmpl;
+	LIST_HEAD(users);
+
+	if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
+		return -EINVAL;
+
+	BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
+
+	down_write(&crypto_alg_sem);
+
+	hlist_del_init(&inst->list);
+	err = crypto_remove_alg(alg, &users);
+
+	up_write(&crypto_alg_sem);
+
+	if (err)
+		return err;
+
+	tmpl->free(inst);
+	crypto_remove_final(&users);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_instance);
+
 int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
 		      struct crypto_instance *inst, u32 mask)
 {
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 2abca780312d..edeebe1a84bb 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -301,7 +301,7 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
 	if (atomic_read(&alg->cra_refcnt) != 1)
 		return -EBUSY;
 
-	return crypto_unregister_alg(alg);
+	return crypto_unregister_instance(alg);
 }
 
 static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index ecc721def10c..418d270e1806 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -134,6 +134,7 @@ struct crypto_template *crypto_lookup_template(const char *name);
 
 int crypto_register_instance(struct crypto_template *tmpl,
 			     struct crypto_instance *inst);
+int crypto_unregister_instance(struct crypto_alg *alg);
 
 int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
 		      struct crypto_instance *inst, u32 mask);

From d19978f58745e586d9385d306d557e7c785abe23 Mon Sep 17 00:00:00 2001
From: "Valdis.Kletnieks@vt.edu" <Valdis.Kletnieks@vt.edu>
Date: Wed, 9 Nov 2011 01:29:20 -0500
Subject: [PATCH 24/54] crypto: fix typo in crypto/Kconfig

Fix a typo in the Kconfig file help text.

Signed-off-by: Valdis Kletnieks <valdis.kletnieks@vt.edu>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/Kconfig | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index 527a857d10b6..733208fe0a2d 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -105,7 +105,7 @@ config CRYPTO_USER
 	depends on NET
 	select CRYPTO_MANAGER
 	help
-	  Userapace configuration for cryptographic instantiations such as
+	  Userspace configuration for cryptographic instantiations such as
 	  cbc(aes).
 
 config CRYPTO_MANAGER_DISABLE_TESTS

From 937c30d7f560210b0163035edd42b2aef78fed9e Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Wed, 9 Nov 2011 16:26:25 +0200
Subject: [PATCH 25/54] crypto: serpent - add 8-way parallel x86_64/SSE2
 assembler implementation

Patch adds x86_64/SSE2 assembler implementation of serpent cipher. Assembler
functions crypt data in eigth block chunks (two 4 block chunk SSE2 operations
in parallel to improve performance on out-of-order CPUs). Glue code is based
on one from AES-NI implementation, so requests from irq context are redirected
to cryptd.

v2:
 - add missing include of linux/module.h
   (appearently crypto.h used to include module.h, which changed for 3.2 by
    commit 7c926402a7e8c9b279968fd94efec8700ba3859e)

Patch has been tested with tcrypt and automated filesystem tests.

Tcrypt benchmarks results (serpent-sse2/serpent_generic speed ratios):

AMD Phenom II 1055T (fam:16, model:10):

size    ecb-enc ecb-dec cbc-enc cbc-dec ctr-enc ctr-dec
16B     1.03x   1.01x   1.03x   1.05x   1.00x   0.99x
64B     1.00x   1.01x   1.02x   1.04x   1.02x   1.01x
256B    2.34x   2.41x   0.99x   2.43x   2.39x   2.40x
1024B   2.51x   2.57x   1.00x   2.59x   2.56x   2.56x
8192B   2.50x   2.54x   1.00x   2.55x   2.57x   2.57x

Intel Celeron T1600 (fam:6, model:15, step:13):

size    ecb-enc ecb-dec cbc-enc cbc-dec ctr-enc ctr-dec
16B     0.97x   0.97x   1.01x   1.01x   1.01x   1.02x
64B     1.00x   1.00x   1.00x   1.02x   1.01x   1.01x
256B    3.41x   3.35x   1.00x   3.39x   3.42x   3.44x
1024B   3.75x   3.72x   0.99x   3.74x   3.75x   3.75x
8192B   3.70x   3.68x   0.99x   3.68x   3.69x   3.69x

Full output:
 http://koti.mbnet.fi/axh/kernel/crypto/phenom-ii-1055t/serpent-generic.txt
 http://koti.mbnet.fi/axh/kernel/crypto/phenom-ii-1055t/serpent-sse2.txt
 http://koti.mbnet.fi/axh/kernel/crypto/celeron-t1600/serpent-generic.txt
 http://koti.mbnet.fi/axh/kernel/crypto/celeron-t1600/serpent-sse2.txt

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 arch/x86/crypto/Makefile                     |   2 +
 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S | 761 +++++++++++++++++++
 arch/x86/crypto/serpent_sse2_glue.c          | 719 ++++++++++++++++++
 arch/x86/include/asm/serpent.h               |  32 +
 crypto/Kconfig                               |  17 +
 crypto/testmgr.c                             |  60 ++
 6 files changed, 1591 insertions(+)
 create mode 100644 arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
 create mode 100644 arch/x86/crypto/serpent_sse2_glue.c
 create mode 100644 arch/x86/include/asm/serpent.h

diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 3537d4b91f74..12ebdbd80ccb 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
 obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
+obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o
 obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
 obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
 
@@ -26,6 +27,7 @@ blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
 twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
 twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
 salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
+serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
 
 aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
 
diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
new file mode 100644
index 000000000000..7f24a1540821
--- /dev/null
+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
@@ -0,0 +1,761 @@
+/*
+ * Serpent Cipher 8-way parallel algorithm (x86_64/SSE2)
+ *
+ * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * Based on crypto/serpent.c by
+ *  Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
+ *                2003 Herbert Valerio Riedel <hvr@gnu.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
+ * USA
+ *
+ */
+
+.file "serpent-sse2-x86_64-asm_64.S"
+.text
+
+#define CTX %rdi
+
+/**********************************************************************
+  8-way SSE2 serpent
+ **********************************************************************/
+#define RA1 %xmm0
+#define RB1 %xmm1
+#define RC1 %xmm2
+#define RD1 %xmm3
+#define RE1 %xmm4
+
+#define RA2 %xmm5
+#define RB2 %xmm6
+#define RC2 %xmm7
+#define RD2 %xmm8
+#define RE2 %xmm9
+
+#define RNOT %xmm10
+
+#define RK0 %xmm11
+#define RK1 %xmm12
+#define RK2 %xmm13
+#define RK3 %xmm14
+
+#define S0_1(x0, x1, x2, x3, x4) \
+	movdqa x3,		x4; \
+	por x0,			x3; \
+	pxor x4,		x0; \
+	pxor x2,		x4; \
+	pxor RNOT,		x4; \
+	pxor x1,		x3; \
+	pand x0,		x1; \
+	pxor x4,		x1; \
+	pxor x0,		x2;
+#define S0_2(x0, x1, x2, x3, x4) \
+	pxor x3,		x0; \
+	por x0,			x4; \
+	pxor x2,		x0; \
+	pand x1,		x2; \
+	pxor x2,		x3; \
+	pxor RNOT,		x1; \
+	pxor x4,		x2; \
+	pxor x2,		x1;
+
+#define S1_1(x0, x1, x2, x3, x4) \
+	movdqa x1,		x4; \
+	pxor x0,		x1; \
+	pxor x3,		x0; \
+	pxor RNOT,		x3; \
+	pand x1,		x4; \
+	por x1,			x0; \
+	pxor x2,		x3; \
+	pxor x3,		x0; \
+	pxor x3,		x1;
+#define S1_2(x0, x1, x2, x3, x4) \
+	pxor x4,		x3; \
+	por x4,			x1; \
+	pxor x2,		x4; \
+	pand x0,		x2; \
+	pxor x1,		x2; \
+	por x0,			x1; \
+	pxor RNOT,		x0; \
+	pxor x2,		x0; \
+	pxor x1,		x4;
+
+#define S2_1(x0, x1, x2, x3, x4) \
+	pxor RNOT,		x3; \
+	pxor x0,		x1; \
+	movdqa x0,		x4; \
+	pand x2,		x0; \
+	pxor x3,		x0; \
+	por x4,			x3; \
+	pxor x1,		x2; \
+	pxor x1,		x3; \
+	pand x0,		x1;
+#define S2_2(x0, x1, x2, x3, x4) \
+	pxor x2,		x0; \
+	pand x3,		x2; \
+	por x1,			x3; \
+	pxor RNOT,		x0; \
+	pxor x0,		x3; \
+	pxor x0,		x4; \
+	pxor x2,		x0; \
+	por x2,			x1;
+
+#define S3_1(x0, x1, x2, x3, x4) \
+	movdqa x1,		x4; \
+	pxor x3,		x1; \
+	por x0,			x3; \
+	pand x0,		x4; \
+	pxor x2,		x0; \
+	pxor x1,		x2; \
+	pand x3,		x1; \
+	pxor x3,		x2; \
+	por x4,			x0; \
+	pxor x3,		x4;
+#define S3_2(x0, x1, x2, x3, x4) \
+	pxor x0,		x1; \
+	pand x3,		x0; \
+	pand x4,		x3; \
+	pxor x2,		x3; \
+	por x1,			x4; \
+	pand x1,		x2; \
+	pxor x3,		x4; \
+	pxor x3,		x0; \
+	pxor x2,		x3;
+
+#define S4_1(x0, x1, x2, x3, x4) \
+	movdqa x3,		x4; \
+	pand x0,		x3; \
+	pxor x4,		x0; \
+	pxor x2,		x3; \
+	por x4,			x2; \
+	pxor x1,		x0; \
+	pxor x3,		x4; \
+	por x0,			x2; \
+	pxor x1,		x2;
+#define S4_2(x0, x1, x2, x3, x4) \
+	pand x0,		x1; \
+	pxor x4,		x1; \
+	pand x2,		x4; \
+	pxor x3,		x2; \
+	pxor x0,		x4; \
+	por x1,			x3; \
+	pxor RNOT,		x1; \
+	pxor x0,		x3;
+
+#define S5_1(x0, x1, x2, x3, x4) \
+	movdqa x1,		x4; \
+	por x0,			x1; \
+	pxor x1,		x2; \
+	pxor RNOT,		x3; \
+	pxor x0,		x4; \
+	pxor x2,		x0; \
+	pand x4,		x1; \
+	por x3,			x4; \
+	pxor x0,		x4;
+#define S5_2(x0, x1, x2, x3, x4) \
+	pand x3,		x0; \
+	pxor x3,		x1; \
+	pxor x2,		x3; \
+	pxor x1,		x0; \
+	pand x4,		x2; \
+	pxor x2,		x1; \
+	pand x0,		x2; \
+	pxor x2,		x3;
+
+#define S6_1(x0, x1, x2, x3, x4) \
+	movdqa x1,		x4; \
+	pxor x0,		x3; \
+	pxor x2,		x1; \
+	pxor x0,		x2; \
+	pand x3,		x0; \
+	por x3,			x1; \
+	pxor RNOT,		x4; \
+	pxor x1,		x0; \
+	pxor x2,		x1;
+#define S6_2(x0, x1, x2, x3, x4) \
+	pxor x4,		x3; \
+	pxor x0,		x4; \
+	pand x0,		x2; \
+	pxor x1,		x4; \
+	pxor x3,		x2; \
+	pand x1,		x3; \
+	pxor x0,		x3; \
+	pxor x2,		x1;
+
+#define S7_1(x0, x1, x2, x3, x4) \
+	pxor RNOT,		x1; \
+	movdqa x1,		x4; \
+	pxor RNOT,		x0; \
+	pand x2,		x1; \
+	pxor x3,		x1; \
+	por x4,			x3; \
+	pxor x2,		x4; \
+	pxor x3,		x2; \
+	pxor x0,		x3; \
+	por x1,			x0;
+#define S7_2(x0, x1, x2, x3, x4) \
+	pand x0,		x2; \
+	pxor x4,		x0; \
+	pxor x3,		x4; \
+	pand x0,		x3; \
+	pxor x1,		x4; \
+	pxor x4,		x2; \
+	pxor x1,		x3; \
+	por x0,			x4; \
+	pxor x1,		x4;
+
+#define SI0_1(x0, x1, x2, x3, x4) \
+	movdqa x3,		x4; \
+	pxor x0,		x1; \
+	por x1,			x3; \
+	pxor x1,		x4; \
+	pxor RNOT,		x0; \
+	pxor x3,		x2; \
+	pxor x0,		x3; \
+	pand x1,		x0; \
+	pxor x2,		x0;
+#define SI0_2(x0, x1, x2, x3, x4) \
+	pand x3,		x2; \
+	pxor x4,		x3; \
+	pxor x3,		x2; \
+	pxor x3,		x1; \
+	pand x0,		x3; \
+	pxor x0,		x1; \
+	pxor x2,		x0; \
+	pxor x3,		x4;
+
+#define SI1_1(x0, x1, x2, x3, x4) \
+	pxor x3,		x1; \
+	movdqa x0,		x4; \
+	pxor x2,		x0; \
+	pxor RNOT,		x2; \
+	por x1,			x4; \
+	pxor x3,		x4; \
+	pand x1,		x3; \
+	pxor x2,		x1; \
+	pand x4,		x2;
+#define SI1_2(x0, x1, x2, x3, x4) \
+	pxor x1,		x4; \
+	por x3,			x1; \
+	pxor x0,		x3; \
+	pxor x0,		x2; \
+	por x4,			x0; \
+	pxor x4,		x2; \
+	pxor x0,		x1; \
+	pxor x1,		x4;
+
+#define SI2_1(x0, x1, x2, x3, x4) \
+	pxor x1,		x2; \
+	movdqa x3,		x4; \
+	pxor RNOT,		x3; \
+	por x2,			x3; \
+	pxor x4,		x2; \
+	pxor x0,		x4; \
+	pxor x1,		x3; \
+	por x2,			x1; \
+	pxor x0,		x2;
+#define SI2_2(x0, x1, x2, x3, x4) \
+	pxor x4,		x1; \
+	por x3,			x4; \
+	pxor x3,		x2; \
+	pxor x2,		x4; \
+	pand x1,		x2; \
+	pxor x3,		x2; \
+	pxor x4,		x3; \
+	pxor x0,		x4;
+
+#define SI3_1(x0, x1, x2, x3, x4) \
+	pxor x1,		x2; \
+	movdqa x1,		x4; \
+	pand x2,		x1; \
+	pxor x0,		x1; \
+	por x4,			x0; \
+	pxor x3,		x4; \
+	pxor x3,		x0; \
+	por x1,			x3; \
+	pxor x2,		x1;
+#define SI3_2(x0, x1, x2, x3, x4) \
+	pxor x3,		x1; \
+	pxor x2,		x0; \
+	pxor x3,		x2; \
+	pand x1,		x3; \
+	pxor x0,		x1; \
+	pand x2,		x0; \
+	pxor x3,		x4; \
+	pxor x0,		x3; \
+	pxor x1,		x0;
+
+#define SI4_1(x0, x1, x2, x3, x4) \
+	pxor x3,		x2; \
+	movdqa x0,		x4; \
+	pand x1,		x0; \
+	pxor x2,		x0; \
+	por x3,			x2; \
+	pxor RNOT,		x4; \
+	pxor x0,		x1; \
+	pxor x2,		x0; \
+	pand x4,		x2;
+#define SI4_2(x0, x1, x2, x3, x4) \
+	pxor x0,		x2; \
+	por x4,			x0; \
+	pxor x3,		x0; \
+	pand x2,		x3; \
+	pxor x3,		x4; \
+	pxor x1,		x3; \
+	pand x0,		x1; \
+	pxor x1,		x4; \
+	pxor x3,		x0;
+
+#define SI5_1(x0, x1, x2, x3, x4) \
+	movdqa x1,		x4; \
+	por x2,			x1; \
+	pxor x4,		x2; \
+	pxor x3,		x1; \
+	pand x4,		x3; \
+	pxor x3,		x2; \
+	por x0,			x3; \
+	pxor RNOT,		x0; \
+	pxor x2,		x3; \
+	por x0,			x2;
+#define SI5_2(x0, x1, x2, x3, x4) \
+	pxor x1,		x4; \
+	pxor x4,		x2; \
+	pand x0,		x4; \
+	pxor x1,		x0; \
+	pxor x3,		x1; \
+	pand x2,		x0; \
+	pxor x3,		x2; \
+	pxor x2,		x0; \
+	pxor x4,		x2; \
+	pxor x3,		x4;
+
+#define SI6_1(x0, x1, x2, x3, x4) \
+	pxor x2,		x0; \
+	movdqa x0,		x4; \
+	pand x3,		x0; \
+	pxor x3,		x2; \
+	pxor x2,		x0; \
+	pxor x1,		x3; \
+	por x4,			x2; \
+	pxor x3,		x2; \
+	pand x0,		x3;
+#define SI6_2(x0, x1, x2, x3, x4) \
+	pxor RNOT,		x0; \
+	pxor x1,		x3; \
+	pand x2,		x1; \
+	pxor x0,		x4; \
+	pxor x4,		x3; \
+	pxor x2,		x4; \
+	pxor x1,		x0; \
+	pxor x0,		x2;
+
+#define SI7_1(x0, x1, x2, x3, x4) \
+	movdqa x3,		x4; \
+	pand x0,		x3; \
+	pxor x2,		x0; \
+	por x4,			x2; \
+	pxor x1,		x4; \
+	pxor RNOT,		x0; \
+	por x3,			x1; \
+	pxor x0,		x4; \
+	pand x2,		x0; \
+	pxor x1,		x0;
+#define SI7_2(x0, x1, x2, x3, x4) \
+	pand x2,		x1; \
+	pxor x2,		x3; \
+	pxor x3,		x4; \
+	pand x3,		x2; \
+	por x0,			x3; \
+	pxor x4,		x1; \
+	pxor x4,		x3; \
+	pand x0,		x4; \
+	pxor x2,		x4;
+
+#define get_key(i, j, t) \
+	movd (4*(i)+(j))*4(CTX), t; \
+	pshufd $0, t, t;
+
+#define K2(x0, x1, x2, x3, x4, i) \
+	get_key(i, 0, RK0); \
+	get_key(i, 1, RK1); \
+	get_key(i, 2, RK2); \
+	get_key(i, 3, RK3); \
+	pxor RK0,		x0 ## 1; \
+	pxor RK1,		x1 ## 1; \
+	pxor RK2,		x2 ## 1; \
+	pxor RK3,		x3 ## 1; \
+		pxor RK0,		x0 ## 2; \
+		pxor RK1,		x1 ## 2; \
+		pxor RK2,		x2 ## 2; \
+		pxor RK3,		x3 ## 2;
+
+#define LK2(x0, x1, x2, x3, x4, i) \
+	movdqa x0 ## 1,		x4 ## 1; \
+	pslld $13,		x0 ## 1; \
+	psrld $(32 - 13),	x4 ## 1; \
+	por x4 ## 1,		x0 ## 1; \
+	pxor x0 ## 1,		x1 ## 1; \
+	movdqa x2 ## 1,		x4 ## 1; \
+	pslld $3,		x2 ## 1; \
+	psrld $(32 - 3),	x4 ## 1; \
+	por x4 ## 1,		x2 ## 1; \
+	pxor x2 ## 1,		x1 ## 1; \
+		movdqa x0 ## 2,		x4 ## 2; \
+		pslld $13,		x0 ## 2; \
+		psrld $(32 - 13),	x4 ## 2; \
+		por x4 ## 2,		x0 ## 2; \
+		pxor x0 ## 2,		x1 ## 2; \
+		movdqa x2 ## 2,		x4 ## 2; \
+		pslld $3,		x2 ## 2; \
+		psrld $(32 - 3),	x4 ## 2; \
+		por x4 ## 2,		x2 ## 2; \
+		pxor x2 ## 2,		x1 ## 2; \
+	movdqa x1 ## 1,		x4 ## 1; \
+	pslld $1,		x1 ## 1; \
+	psrld $(32 - 1),	x4 ## 1; \
+	por x4 ## 1,		x1 ## 1; \
+	movdqa x0 ## 1,		x4 ## 1; \
+	pslld $3,		x4 ## 1; \
+	pxor x2 ## 1,		x3 ## 1; \
+	pxor x4 ## 1,		x3 ## 1; \
+	movdqa x3 ## 1,		x4 ## 1; \
+	get_key(i, 1, RK1); \
+		movdqa x1 ## 2,		x4 ## 2; \
+		pslld $1,		x1 ## 2; \
+		psrld $(32 - 1),	x4 ## 2; \
+		por x4 ## 2,		x1 ## 2; \
+		movdqa x0 ## 2,		x4 ## 2; \
+		pslld $3,		x4 ## 2; \
+		pxor x2 ## 2,		x3 ## 2; \
+		pxor x4 ## 2,		x3 ## 2; \
+		movdqa x3 ## 2,		x4 ## 2; \
+		get_key(i, 3, RK3); \
+	pslld $7,		x3 ## 1; \
+	psrld $(32 - 7),	x4 ## 1; \
+	por x4 ## 1,		x3 ## 1; \
+	movdqa x1 ## 1,		x4 ## 1; \
+	pslld $7,		x4 ## 1; \
+	pxor x1 ## 1,		x0 ## 1; \
+	pxor x3 ## 1,		x0 ## 1; \
+	pxor x3 ## 1,		x2 ## 1; \
+	pxor x4 ## 1,		x2 ## 1; \
+	get_key(i, 0, RK0); \
+		pslld $7,		x3 ## 2; \
+		psrld $(32 - 7),	x4 ## 2; \
+		por x4 ## 2,		x3 ## 2; \
+		movdqa x1 ## 2,		x4 ## 2; \
+		pslld $7,		x4 ## 2; \
+		pxor x1 ## 2,		x0 ## 2; \
+		pxor x3 ## 2,		x0 ## 2; \
+		pxor x3 ## 2,		x2 ## 2; \
+		pxor x4 ## 2,		x2 ## 2; \
+		get_key(i, 2, RK2); \
+	pxor RK1,		x1 ## 1; \
+	pxor RK3,		x3 ## 1; \
+	movdqa x0 ## 1,		x4 ## 1; \
+	pslld $5,		x0 ## 1; \
+	psrld $(32 - 5),	x4 ## 1; \
+	por x4 ## 1,		x0 ## 1; \
+	movdqa x2 ## 1,		x4 ## 1; \
+	pslld $22,		x2 ## 1; \
+	psrld $(32 - 22),	x4 ## 1; \
+	por x4 ## 1,		x2 ## 1; \
+	pxor RK0,		x0 ## 1; \
+	pxor RK2,		x2 ## 1; \
+		pxor RK1,		x1 ## 2; \
+		pxor RK3,		x3 ## 2; \
+		movdqa x0 ## 2,		x4 ## 2; \
+		pslld $5,		x0 ## 2; \
+		psrld $(32 - 5),	x4 ## 2; \
+		por x4 ## 2,		x0 ## 2; \
+		movdqa x2 ## 2,		x4 ## 2; \
+		pslld $22,		x2 ## 2; \
+		psrld $(32 - 22),	x4 ## 2; \
+		por x4 ## 2,		x2 ## 2; \
+		pxor RK0,		x0 ## 2; \
+		pxor RK2,		x2 ## 2;
+
+#define KL2(x0, x1, x2, x3, x4, i) \
+	pxor RK0,		x0 ## 1; \
+	pxor RK2,		x2 ## 1; \
+	movdqa x0 ## 1,		x4 ## 1; \
+	psrld $5,		x0 ## 1; \
+	pslld $(32 - 5),	x4 ## 1; \
+	por x4 ## 1,		x0 ## 1; \
+	pxor RK3,		x3 ## 1; \
+	pxor RK1,		x1 ## 1; \
+	movdqa x2 ## 1,		x4 ## 1; \
+	psrld $22,		x2 ## 1; \
+	pslld $(32 - 22),	x4 ## 1; \
+	por x4 ## 1,		x2 ## 1; \
+	pxor x3 ## 1,		x2 ## 1; \
+		pxor RK0,		x0 ## 2; \
+		pxor RK2,		x2 ## 2; \
+		movdqa x0 ## 2,		x4 ## 2; \
+		psrld $5,		x0 ## 2; \
+		pslld $(32 - 5),	x4 ## 2; \
+		por x4 ## 2,		x0 ## 2; \
+		pxor RK3,		x3 ## 2; \
+		pxor RK1,		x1 ## 2; \
+		movdqa x2 ## 2,		x4 ## 2; \
+		psrld $22,		x2 ## 2; \
+		pslld $(32 - 22),	x4 ## 2; \
+		por x4 ## 2,		x2 ## 2; \
+		pxor x3 ## 2,		x2 ## 2; \
+	pxor x3 ## 1,		x0 ## 1; \
+	movdqa x1 ## 1,		x4 ## 1; \
+	pslld $7,		x4 ## 1; \
+	pxor x1 ## 1,		x0 ## 1; \
+	pxor x4 ## 1,		x2 ## 1; \
+	movdqa x1 ## 1,		x4 ## 1; \
+	psrld $1,		x1 ## 1; \
+	pslld $(32 - 1),	x4 ## 1; \
+	por x4 ## 1,		x1 ## 1; \
+		pxor x3 ## 2,		x0 ## 2; \
+		movdqa x1 ## 2,		x4 ## 2; \
+		pslld $7,		x4 ## 2; \
+		pxor x1 ## 2,		x0 ## 2; \
+		pxor x4 ## 2,		x2 ## 2; \
+		movdqa x1 ## 2,		x4 ## 2; \
+		psrld $1,		x1 ## 2; \
+		pslld $(32 - 1),	x4 ## 2; \
+		por x4 ## 2,		x1 ## 2; \
+	movdqa x3 ## 1,		x4 ## 1; \
+	psrld $7,		x3 ## 1; \
+	pslld $(32 - 7),	x4 ## 1; \
+	por x4 ## 1,		x3 ## 1; \
+	pxor x0 ## 1,		x1 ## 1; \
+	movdqa x0 ## 1,		x4 ## 1; \
+	pslld $3,		x4 ## 1; \
+	pxor x4 ## 1,		x3 ## 1; \
+	movdqa x0 ## 1,		x4 ## 1; \
+		movdqa x3 ## 2,		x4 ## 2; \
+		psrld $7,		x3 ## 2; \
+		pslld $(32 - 7),	x4 ## 2; \
+		por x4 ## 2,		x3 ## 2; \
+		pxor x0 ## 2,		x1 ## 2; \
+		movdqa x0 ## 2,		x4 ## 2; \
+		pslld $3,		x4 ## 2; \
+		pxor x4 ## 2,		x3 ## 2; \
+		movdqa x0 ## 2,		x4 ## 2; \
+	psrld $13,		x0 ## 1; \
+	pslld $(32 - 13),	x4 ## 1; \
+	por x4 ## 1,		x0 ## 1; \
+	pxor x2 ## 1,		x1 ## 1; \
+	pxor x2 ## 1,		x3 ## 1; \
+	movdqa x2 ## 1,		x4 ## 1; \
+	psrld $3,		x2 ## 1; \
+	pslld $(32 - 3),	x4 ## 1; \
+	por x4 ## 1,		x2 ## 1; \
+		psrld $13,		x0 ## 2; \
+		pslld $(32 - 13),	x4 ## 2; \
+		por x4 ## 2,		x0 ## 2; \
+		pxor x2 ## 2,		x1 ## 2; \
+		pxor x2 ## 2,		x3 ## 2; \
+		movdqa x2 ## 2,		x4 ## 2; \
+		psrld $3,		x2 ## 2; \
+		pslld $(32 - 3),	x4 ## 2; \
+		por x4 ## 2,		x2 ## 2;
+
+#define S(SBOX, x0, x1, x2, x3, x4) \
+	SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
+	SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
+	SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \
+	SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2);
+
+#define SP(SBOX, x0, x1, x2, x3, x4, i) \
+	get_key(i, 0, RK0); \
+	SBOX ## _1(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
+	get_key(i, 2, RK2); \
+	SBOX ## _1(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \
+	get_key(i, 3, RK3); \
+	SBOX ## _2(x0 ## 1, x1 ## 1, x2 ## 1, x3 ## 1, x4 ## 1); \
+	get_key(i, 1, RK1); \
+	SBOX ## _2(x0 ## 2, x1 ## 2, x2 ## 2, x3 ## 2, x4 ## 2); \
+
+#define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
+	movdqa x2,		t3; \
+	movdqa x0,		t1; \
+	unpcklps x3,		t3; \
+	movdqa x0,		t2; \
+	unpcklps x1,		t1; \
+	unpckhps x1,		t2; \
+	movdqa t3,		x1; \
+	unpckhps x3,		x2; \
+	movdqa t1,		x0; \
+	movhlps t1,		x1; \
+	movdqa t2,		t1; \
+	movlhps t3,		x0; \
+	movlhps x2,		t1; \
+	movhlps t2,		x2; \
+	movdqa x2,		x3; \
+	movdqa t1,		x2;
+
+#define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \
+	movdqu (0*4*4)(in),	x0; \
+	movdqu (1*4*4)(in),	x1; \
+	movdqu (2*4*4)(in),	x2; \
+	movdqu (3*4*4)(in),	x3; \
+	\
+	transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
+
+#define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
+	transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
+	\
+	movdqu x0,		(0*4*4)(out); \
+	movdqu x1,		(1*4*4)(out); \
+	movdqu x2,		(2*4*4)(out); \
+	movdqu x3,		(3*4*4)(out);
+
+#define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
+	transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
+	\
+	movdqu (0*4*4)(out),	t0; \
+	pxor t0,		x0; \
+	movdqu x0,		(0*4*4)(out); \
+	movdqu (1*4*4)(out),	t0; \
+	pxor t0,		x1; \
+	movdqu x1,		(1*4*4)(out); \
+	movdqu (2*4*4)(out),	t0; \
+	pxor t0,		x2; \
+	movdqu x2,		(2*4*4)(out); \
+	movdqu (3*4*4)(out),	t0; \
+	pxor t0,		x3; \
+	movdqu x3,		(3*4*4)(out);
+
+.align 8
+.global __serpent_enc_blk_8way
+.type   __serpent_enc_blk_8way,@function;
+
+__serpent_enc_blk_8way:
+	/* input:
+	 *	%rdi: ctx, CTX
+	 *	%rsi: dst
+	 *	%rdx: src
+	 *	%rcx: bool, if true: xor output
+	 */
+
+	pcmpeqd RNOT, RNOT;
+
+	leaq (4*4*4)(%rdx), %rax;
+	read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+	read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+
+						 K2(RA, RB, RC, RD, RE, 0);
+	S(S0, RA, RB, RC, RD, RE);		LK2(RC, RB, RD, RA, RE, 1);
+	S(S1, RC, RB, RD, RA, RE);		LK2(RE, RD, RA, RC, RB, 2);
+	S(S2, RE, RD, RA, RC, RB);		LK2(RB, RD, RE, RC, RA, 3);
+	S(S3, RB, RD, RE, RC, RA);		LK2(RC, RA, RD, RB, RE, 4);
+	S(S4, RC, RA, RD, RB, RE);		LK2(RA, RD, RB, RE, RC, 5);
+	S(S5, RA, RD, RB, RE, RC);		LK2(RC, RA, RD, RE, RB, 6);
+	S(S6, RC, RA, RD, RE, RB);		LK2(RD, RB, RA, RE, RC, 7);
+	S(S7, RD, RB, RA, RE, RC);		LK2(RC, RA, RE, RD, RB, 8);
+	S(S0, RC, RA, RE, RD, RB);		LK2(RE, RA, RD, RC, RB, 9);
+	S(S1, RE, RA, RD, RC, RB);		LK2(RB, RD, RC, RE, RA, 10);
+	S(S2, RB, RD, RC, RE, RA);		LK2(RA, RD, RB, RE, RC, 11);
+	S(S3, RA, RD, RB, RE, RC);		LK2(RE, RC, RD, RA, RB, 12);
+	S(S4, RE, RC, RD, RA, RB);		LK2(RC, RD, RA, RB, RE, 13);
+	S(S5, RC, RD, RA, RB, RE);		LK2(RE, RC, RD, RB, RA, 14);
+	S(S6, RE, RC, RD, RB, RA);		LK2(RD, RA, RC, RB, RE, 15);
+	S(S7, RD, RA, RC, RB, RE);		LK2(RE, RC, RB, RD, RA, 16);
+	S(S0, RE, RC, RB, RD, RA);		LK2(RB, RC, RD, RE, RA, 17);
+	S(S1, RB, RC, RD, RE, RA);		LK2(RA, RD, RE, RB, RC, 18);
+	S(S2, RA, RD, RE, RB, RC);		LK2(RC, RD, RA, RB, RE, 19);
+	S(S3, RC, RD, RA, RB, RE);		LK2(RB, RE, RD, RC, RA, 20);
+	S(S4, RB, RE, RD, RC, RA);		LK2(RE, RD, RC, RA, RB, 21);
+	S(S5, RE, RD, RC, RA, RB);		LK2(RB, RE, RD, RA, RC, 22);
+	S(S6, RB, RE, RD, RA, RC);		LK2(RD, RC, RE, RA, RB, 23);
+	S(S7, RD, RC, RE, RA, RB);		LK2(RB, RE, RA, RD, RC, 24);
+	S(S0, RB, RE, RA, RD, RC);		LK2(RA, RE, RD, RB, RC, 25);
+	S(S1, RA, RE, RD, RB, RC);		LK2(RC, RD, RB, RA, RE, 26);
+	S(S2, RC, RD, RB, RA, RE);		LK2(RE, RD, RC, RA, RB, 27);
+	S(S3, RE, RD, RC, RA, RB);		LK2(RA, RB, RD, RE, RC, 28);
+	S(S4, RA, RB, RD, RE, RC);		LK2(RB, RD, RE, RC, RA, 29);
+	S(S5, RB, RD, RE, RC, RA);		LK2(RA, RB, RD, RC, RE, 30);
+	S(S6, RA, RB, RD, RC, RE);		LK2(RD, RE, RB, RC, RA, 31);
+	S(S7, RD, RE, RB, RC, RA);		 K2(RA, RB, RC, RD, RE, 32);
+
+	leaq (4*4*4)(%rsi), %rax;
+
+	testb %cl, %cl;
+	jnz __enc_xor8;
+
+	write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+	write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+
+	ret;
+
+__enc_xor8:
+	xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+	xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+
+	ret;
+
+.align 8
+.global serpent_dec_blk_8way
+.type   serpent_dec_blk_8way,@function;
+
+serpent_dec_blk_8way:
+	/* input:
+	 *	%rdi: ctx, CTX
+	 *	%rsi: dst
+	 *	%rdx: src
+	 */
+
+	pcmpeqd RNOT, RNOT;
+
+	leaq (4*4*4)(%rdx), %rax;
+	read_blocks(%rdx, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+	read_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+
+						 K2(RA, RB, RC, RD, RE, 32);
+	SP(SI7, RA, RB, RC, RD, RE, 31);	KL2(RB, RD, RA, RE, RC, 31);
+	SP(SI6, RB, RD, RA, RE, RC, 30);	KL2(RA, RC, RE, RB, RD, 30);
+	SP(SI5, RA, RC, RE, RB, RD, 29);	KL2(RC, RD, RA, RE, RB, 29);
+	SP(SI4, RC, RD, RA, RE, RB, 28);	KL2(RC, RA, RB, RE, RD, 28);
+	SP(SI3, RC, RA, RB, RE, RD, 27);	KL2(RB, RC, RD, RE, RA, 27);
+	SP(SI2, RB, RC, RD, RE, RA, 26);	KL2(RC, RA, RE, RD, RB, 26);
+	SP(SI1, RC, RA, RE, RD, RB, 25);	KL2(RB, RA, RE, RD, RC, 25);
+	SP(SI0, RB, RA, RE, RD, RC, 24);	KL2(RE, RC, RA, RB, RD, 24);
+	SP(SI7, RE, RC, RA, RB, RD, 23);	KL2(RC, RB, RE, RD, RA, 23);
+	SP(SI6, RC, RB, RE, RD, RA, 22);	KL2(RE, RA, RD, RC, RB, 22);
+	SP(SI5, RE, RA, RD, RC, RB, 21);	KL2(RA, RB, RE, RD, RC, 21);
+	SP(SI4, RA, RB, RE, RD, RC, 20);	KL2(RA, RE, RC, RD, RB, 20);
+	SP(SI3, RA, RE, RC, RD, RB, 19);	KL2(RC, RA, RB, RD, RE, 19);
+	SP(SI2, RC, RA, RB, RD, RE, 18);	KL2(RA, RE, RD, RB, RC, 18);
+	SP(SI1, RA, RE, RD, RB, RC, 17);	KL2(RC, RE, RD, RB, RA, 17);
+	SP(SI0, RC, RE, RD, RB, RA, 16);	KL2(RD, RA, RE, RC, RB, 16);
+	SP(SI7, RD, RA, RE, RC, RB, 15);	KL2(RA, RC, RD, RB, RE, 15);
+	SP(SI6, RA, RC, RD, RB, RE, 14);	KL2(RD, RE, RB, RA, RC, 14);
+	SP(SI5, RD, RE, RB, RA, RC, 13);	KL2(RE, RC, RD, RB, RA, 13);
+	SP(SI4, RE, RC, RD, RB, RA, 12);	KL2(RE, RD, RA, RB, RC, 12);
+	SP(SI3, RE, RD, RA, RB, RC, 11);	KL2(RA, RE, RC, RB, RD, 11);
+	SP(SI2, RA, RE, RC, RB, RD, 10);	KL2(RE, RD, RB, RC, RA, 10);
+	SP(SI1, RE, RD, RB, RC, RA, 9);		KL2(RA, RD, RB, RC, RE, 9);
+	SP(SI0, RA, RD, RB, RC, RE, 8);		KL2(RB, RE, RD, RA, RC, 8);
+	SP(SI7, RB, RE, RD, RA, RC, 7);		KL2(RE, RA, RB, RC, RD, 7);
+	SP(SI6, RE, RA, RB, RC, RD, 6);		KL2(RB, RD, RC, RE, RA, 6);
+	SP(SI5, RB, RD, RC, RE, RA, 5);		KL2(RD, RA, RB, RC, RE, 5);
+	SP(SI4, RD, RA, RB, RC, RE, 4);		KL2(RD, RB, RE, RC, RA, 4);
+	SP(SI3, RD, RB, RE, RC, RA, 3);		KL2(RE, RD, RA, RC, RB, 3);
+	SP(SI2, RE, RD, RA, RC, RB, 2);		KL2(RD, RB, RC, RA, RE, 2);
+	SP(SI1, RD, RB, RC, RA, RE, 1);		KL2(RE, RB, RC, RA, RD, 1);
+	S(SI0, RE, RB, RC, RA, RD);		 K2(RC, RD, RB, RE, RA, 0);
+
+	leaq (4*4*4)(%rsi), %rax;
+	write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+	write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
+
+	ret;
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
new file mode 100644
index 000000000000..947cf570f6a7
--- /dev/null
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -0,0 +1,719 @@
+/*
+ * Glue Code for SSE2 assembler versions of Serpent Cipher
+ *
+ * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * Glue code based on aesni-intel_glue.c by:
+ *  Copyright (C) 2008, Intel Corp.
+ *    Author: Huang Ying <ying.huang@intel.com>
+ *
+ * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
+ *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ * CTR part based on code (crypto/ctr.c) by:
+ *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
+ * USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/hardirq.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <crypto/algapi.h>
+#include <crypto/serpent.h>
+#include <crypto/cryptd.h>
+#include <crypto/b128ops.h>
+#include <crypto/ctr.h>
+#include <asm/i387.h>
+#include <asm/serpent.h>
+#include <crypto/scatterwalk.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+
+struct async_serpent_ctx {
+	struct cryptd_ablkcipher *cryptd_tfm;
+};
+
+static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+{
+	if (fpu_enabled)
+		return true;
+
+	/* SSE2 is only used when chunk to be processed is large enough, so
+	 * do not enable FPU until it is necessary.
+	 */
+	if (nbytes < SERPENT_BLOCK_SIZE * SERPENT_PARALLEL_BLOCKS)
+		return false;
+
+	kernel_fpu_begin();
+	return true;
+}
+
+static inline void serpent_fpu_end(bool fpu_enabled)
+{
+	if (fpu_enabled)
+		kernel_fpu_end();
+}
+
+static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+		     bool enc)
+{
+	bool fpu_enabled = false;
+	struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	const unsigned int bsize = SERPENT_BLOCK_SIZE;
+	unsigned int nbytes;
+	int err;
+
+	err = blkcipher_walk_virt(desc, walk);
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	while ((nbytes = walk->nbytes)) {
+		u8 *wsrc = walk->src.virt.addr;
+		u8 *wdst = walk->dst.virt.addr;
+
+		fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
+
+		/* Process multi-block batch */
+		if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
+			do {
+				if (enc)
+					serpent_enc_blk_xway(ctx, wdst, wsrc);
+				else
+					serpent_dec_blk_xway(ctx, wdst, wsrc);
+
+				wsrc += bsize * SERPENT_PARALLEL_BLOCKS;
+				wdst += bsize * SERPENT_PARALLEL_BLOCKS;
+				nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
+			} while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
+
+			if (nbytes < bsize)
+				goto done;
+		}
+
+		/* Handle leftovers */
+		do {
+			if (enc)
+				__serpent_encrypt(ctx, wdst, wsrc);
+			else
+				__serpent_decrypt(ctx, wdst, wsrc);
+
+			wsrc += bsize;
+			wdst += bsize;
+			nbytes -= bsize;
+		} while (nbytes >= bsize);
+
+done:
+		err = blkcipher_walk_done(desc, walk, nbytes);
+	}
+
+	serpent_fpu_end(fpu_enabled);
+	return err;
+}
+
+static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct blkcipher_walk walk;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	return ecb_crypt(desc, &walk, true);
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct blkcipher_walk walk;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	return ecb_crypt(desc, &walk, false);
+}
+
+static struct crypto_alg blk_ecb_alg = {
+	.cra_name		= "__ecb-serpent-sse2",
+	.cra_driver_name	= "__driver-ecb-serpent-sse2",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= SERPENT_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct serpent_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(blk_ecb_alg.cra_list),
+	.cra_u = {
+		.blkcipher = {
+			.min_keysize	= SERPENT_MIN_KEY_SIZE,
+			.max_keysize	= SERPENT_MAX_KEY_SIZE,
+			.setkey		= serpent_setkey,
+			.encrypt	= ecb_encrypt,
+			.decrypt	= ecb_decrypt,
+		},
+	},
+};
+
+static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
+				  struct blkcipher_walk *walk)
+{
+	struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	const unsigned int bsize = SERPENT_BLOCK_SIZE;
+	unsigned int nbytes = walk->nbytes;
+	u128 *src = (u128 *)walk->src.virt.addr;
+	u128 *dst = (u128 *)walk->dst.virt.addr;
+	u128 *iv = (u128 *)walk->iv;
+
+	do {
+		u128_xor(dst, src, iv);
+		__serpent_encrypt(ctx, (u8 *)dst, (u8 *)dst);
+		iv = dst;
+
+		src += 1;
+		dst += 1;
+		nbytes -= bsize;
+	} while (nbytes >= bsize);
+
+	u128_xor((u128 *)walk->iv, (u128 *)walk->iv, iv);
+	return nbytes;
+}
+
+static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+
+	while ((nbytes = walk.nbytes)) {
+		nbytes = __cbc_encrypt(desc, &walk);
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	return err;
+}
+
+static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
+				  struct blkcipher_walk *walk)
+{
+	struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	const unsigned int bsize = SERPENT_BLOCK_SIZE;
+	unsigned int nbytes = walk->nbytes;
+	u128 *src = (u128 *)walk->src.virt.addr;
+	u128 *dst = (u128 *)walk->dst.virt.addr;
+	u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
+	u128 last_iv;
+	int i;
+
+	/* Start of the last block. */
+	src += nbytes / bsize - 1;
+	dst += nbytes / bsize - 1;
+
+	last_iv = *src;
+
+	/* Process multi-block batch */
+	if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
+		do {
+			nbytes -= bsize * (SERPENT_PARALLEL_BLOCKS - 1);
+			src -= SERPENT_PARALLEL_BLOCKS - 1;
+			dst -= SERPENT_PARALLEL_BLOCKS - 1;
+
+			for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
+				ivs[i] = src[i];
+
+			serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src);
+
+			for (i = 0; i < SERPENT_PARALLEL_BLOCKS - 1; i++)
+				u128_xor(dst + (i + 1), dst + (i + 1), ivs + i);
+
+			nbytes -= bsize;
+			if (nbytes < bsize)
+				goto done;
+
+			u128_xor(dst, dst, src - 1);
+			src -= 1;
+			dst -= 1;
+		} while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
+
+		if (nbytes < bsize)
+			goto done;
+	}
+
+	/* Handle leftovers */
+	for (;;) {
+		__serpent_decrypt(ctx, (u8 *)dst, (u8 *)src);
+
+		nbytes -= bsize;
+		if (nbytes < bsize)
+			break;
+
+		u128_xor(dst, dst, src - 1);
+		src -= 1;
+		dst -= 1;
+	}
+
+done:
+	u128_xor(dst, dst, (u128 *)walk->iv);
+	*(u128 *)walk->iv = last_iv;
+
+	return nbytes;
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	bool fpu_enabled = false;
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt(desc, &walk);
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	while ((nbytes = walk.nbytes)) {
+		fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
+		nbytes = __cbc_decrypt(desc, &walk);
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	serpent_fpu_end(fpu_enabled);
+	return err;
+}
+
+static struct crypto_alg blk_cbc_alg = {
+	.cra_name		= "__cbc-serpent-sse2",
+	.cra_driver_name	= "__driver-cbc-serpent-sse2",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= SERPENT_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct serpent_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(blk_cbc_alg.cra_list),
+	.cra_u = {
+		.blkcipher = {
+			.min_keysize	= SERPENT_MIN_KEY_SIZE,
+			.max_keysize	= SERPENT_MAX_KEY_SIZE,
+			.setkey		= serpent_setkey,
+			.encrypt	= cbc_encrypt,
+			.decrypt	= cbc_decrypt,
+		},
+	},
+};
+
+static inline void u128_to_be128(be128 *dst, const u128 *src)
+{
+	dst->a = cpu_to_be64(src->a);
+	dst->b = cpu_to_be64(src->b);
+}
+
+static inline void be128_to_u128(u128 *dst, const be128 *src)
+{
+	dst->a = be64_to_cpu(src->a);
+	dst->b = be64_to_cpu(src->b);
+}
+
+static inline void u128_inc(u128 *i)
+{
+	i->b++;
+	if (!i->b)
+		i->a++;
+}
+
+static void ctr_crypt_final(struct blkcipher_desc *desc,
+			    struct blkcipher_walk *walk)
+{
+	struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	u8 *ctrblk = walk->iv;
+	u8 keystream[SERPENT_BLOCK_SIZE];
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	unsigned int nbytes = walk->nbytes;
+
+	__serpent_encrypt(ctx, keystream, ctrblk);
+	crypto_xor(keystream, src, nbytes);
+	memcpy(dst, keystream, nbytes);
+
+	crypto_inc(ctrblk, SERPENT_BLOCK_SIZE);
+}
+
+static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
+				struct blkcipher_walk *walk)
+{
+	struct serpent_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	const unsigned int bsize = SERPENT_BLOCK_SIZE;
+	unsigned int nbytes = walk->nbytes;
+	u128 *src = (u128 *)walk->src.virt.addr;
+	u128 *dst = (u128 *)walk->dst.virt.addr;
+	u128 ctrblk;
+	be128 ctrblocks[SERPENT_PARALLEL_BLOCKS];
+	int i;
+
+	be128_to_u128(&ctrblk, (be128 *)walk->iv);
+
+	/* Process multi-block batch */
+	if (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS) {
+		do {
+			/* create ctrblks for parallel encrypt */
+			for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
+				if (dst != src)
+					dst[i] = src[i];
+
+				u128_to_be128(&ctrblocks[i], &ctrblk);
+				u128_inc(&ctrblk);
+			}
+
+			serpent_enc_blk_xway_xor(ctx, (u8 *)dst,
+						 (u8 *)ctrblocks);
+
+			src += SERPENT_PARALLEL_BLOCKS;
+			dst += SERPENT_PARALLEL_BLOCKS;
+			nbytes -= bsize * SERPENT_PARALLEL_BLOCKS;
+		} while (nbytes >= bsize * SERPENT_PARALLEL_BLOCKS);
+
+		if (nbytes < bsize)
+			goto done;
+	}
+
+	/* Handle leftovers */
+	do {
+		if (dst != src)
+			*dst = *src;
+
+		u128_to_be128(&ctrblocks[0], &ctrblk);
+		u128_inc(&ctrblk);
+
+		__serpent_encrypt(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
+		u128_xor(dst, dst, (u128 *)ctrblocks);
+
+		src += 1;
+		dst += 1;
+		nbytes -= bsize;
+	} while (nbytes >= bsize);
+
+done:
+	u128_to_be128((be128 *)walk->iv, &ctrblk);
+	return nbytes;
+}
+
+static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		     struct scatterlist *src, unsigned int nbytes)
+{
+	bool fpu_enabled = false;
+	struct blkcipher_walk walk;
+	int err;
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, SERPENT_BLOCK_SIZE);
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	while ((nbytes = walk.nbytes) >= SERPENT_BLOCK_SIZE) {
+		fpu_enabled = serpent_fpu_begin(fpu_enabled, nbytes);
+		nbytes = __ctr_crypt(desc, &walk);
+		err = blkcipher_walk_done(desc, &walk, nbytes);
+	}
+
+	serpent_fpu_end(fpu_enabled);
+
+	if (walk.nbytes) {
+		ctr_crypt_final(desc, &walk);
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+
+	return err;
+}
+
+static struct crypto_alg blk_ctr_alg = {
+	.cra_name		= "__ctr-serpent-sse2",
+	.cra_driver_name	= "__driver-ctr-serpent-sse2",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct serpent_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(blk_ctr_alg.cra_list),
+	.cra_u = {
+		.blkcipher = {
+			.min_keysize	= SERPENT_MIN_KEY_SIZE,
+			.max_keysize	= SERPENT_MAX_KEY_SIZE,
+			.ivsize		= SERPENT_BLOCK_SIZE,
+			.setkey		= serpent_setkey,
+			.encrypt	= ctr_crypt,
+			.decrypt	= ctr_crypt,
+		},
+	},
+};
+
+static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
+			unsigned int key_len)
+{
+	struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
+	int err;
+
+	crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
+				    & CRYPTO_TFM_REQ_MASK);
+	err = crypto_ablkcipher_setkey(child, key, key_len);
+	crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
+				    & CRYPTO_TFM_RES_MASK);
+	return err;
+}
+
+static int __ablk_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	struct blkcipher_desc desc;
+
+	desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
+	desc.info = req->info;
+	desc.flags = 0;
+
+	return crypto_blkcipher_crt(desc.tfm)->encrypt(
+		&desc, req->dst, req->src, req->nbytes);
+}
+
+static int ablk_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+	if (!irq_fpu_usable()) {
+		struct ablkcipher_request *cryptd_req =
+			ablkcipher_request_ctx(req);
+
+		memcpy(cryptd_req, req, sizeof(*req));
+		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+
+		return crypto_ablkcipher_encrypt(cryptd_req);
+	} else {
+		return __ablk_encrypt(req);
+	}
+}
+
+static int ablk_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct async_serpent_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+	if (!irq_fpu_usable()) {
+		struct ablkcipher_request *cryptd_req =
+			ablkcipher_request_ctx(req);
+
+		memcpy(cryptd_req, req, sizeof(*req));
+		ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+
+		return crypto_ablkcipher_decrypt(cryptd_req);
+	} else {
+		struct blkcipher_desc desc;
+
+		desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
+		desc.info = req->info;
+		desc.flags = 0;
+
+		return crypto_blkcipher_crt(desc.tfm)->decrypt(
+			&desc, req->dst, req->src, req->nbytes);
+	}
+}
+
+static void ablk_exit(struct crypto_tfm *tfm)
+{
+	struct async_serpent_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	cryptd_free_ablkcipher(ctx->cryptd_tfm);
+}
+
+static void ablk_init_common(struct crypto_tfm *tfm,
+			     struct cryptd_ablkcipher *cryptd_tfm)
+{
+	struct async_serpent_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->cryptd_tfm = cryptd_tfm;
+	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
+		crypto_ablkcipher_reqsize(&cryptd_tfm->base);
+}
+
+static int ablk_ecb_init(struct crypto_tfm *tfm)
+{
+	struct cryptd_ablkcipher *cryptd_tfm;
+
+	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-serpent-sse2", 0, 0);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+	ablk_init_common(tfm, cryptd_tfm);
+	return 0;
+}
+
+static struct crypto_alg ablk_ecb_alg = {
+	.cra_name		= "ecb(serpent)",
+	.cra_driver_name	= "ecb-serpent-sse2",
+	.cra_priority		= 400,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= SERPENT_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct async_serpent_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
+	.cra_init		= ablk_ecb_init,
+	.cra_exit		= ablk_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.min_keysize	= SERPENT_MIN_KEY_SIZE,
+			.max_keysize	= SERPENT_MAX_KEY_SIZE,
+			.setkey		= ablk_set_key,
+			.encrypt	= ablk_encrypt,
+			.decrypt	= ablk_decrypt,
+		},
+	},
+};
+
+static int ablk_cbc_init(struct crypto_tfm *tfm)
+{
+	struct cryptd_ablkcipher *cryptd_tfm;
+
+	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-serpent-sse2", 0, 0);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+	ablk_init_common(tfm, cryptd_tfm);
+	return 0;
+}
+
+static struct crypto_alg ablk_cbc_alg = {
+	.cra_name		= "cbc(serpent)",
+	.cra_driver_name	= "cbc-serpent-sse2",
+	.cra_priority		= 400,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= SERPENT_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct async_serpent_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
+	.cra_init		= ablk_cbc_init,
+	.cra_exit		= ablk_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.min_keysize	= SERPENT_MIN_KEY_SIZE,
+			.max_keysize	= SERPENT_MAX_KEY_SIZE,
+			.ivsize		= SERPENT_BLOCK_SIZE,
+			.setkey		= ablk_set_key,
+			.encrypt	= __ablk_encrypt,
+			.decrypt	= ablk_decrypt,
+		},
+	},
+};
+
+static int ablk_ctr_init(struct crypto_tfm *tfm)
+{
+	struct cryptd_ablkcipher *cryptd_tfm;
+
+	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-serpent-sse2", 0, 0);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+	ablk_init_common(tfm, cryptd_tfm);
+	return 0;
+}
+
+static struct crypto_alg ablk_ctr_alg = {
+	.cra_name		= "ctr(serpent)",
+	.cra_driver_name	= "ctr-serpent-sse2",
+	.cra_priority		= 400,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= 1,
+	.cra_ctxsize		= sizeof(struct async_serpent_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
+	.cra_init		= ablk_ctr_init,
+	.cra_exit		= ablk_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.min_keysize	= SERPENT_MIN_KEY_SIZE,
+			.max_keysize	= SERPENT_MAX_KEY_SIZE,
+			.ivsize		= SERPENT_BLOCK_SIZE,
+			.setkey		= ablk_set_key,
+			.encrypt	= ablk_encrypt,
+			.decrypt	= ablk_encrypt,
+			.geniv		= "chainiv",
+		},
+	},
+};
+
+static int __init serpent_sse2_init(void)
+{
+	int err;
+
+	if (!cpu_has_xmm2) {
+		printk(KERN_INFO "SSE2 instructions are not detected.\n");
+		return -ENODEV;
+	}
+
+	err = crypto_register_alg(&blk_ecb_alg);
+	if (err)
+		goto blk_ecb_err;
+	err = crypto_register_alg(&blk_cbc_alg);
+	if (err)
+		goto blk_cbc_err;
+	err = crypto_register_alg(&blk_ctr_alg);
+	if (err)
+		goto blk_ctr_err;
+	err = crypto_register_alg(&ablk_ecb_alg);
+	if (err)
+		goto ablk_ecb_err;
+	err = crypto_register_alg(&ablk_cbc_alg);
+	if (err)
+		goto ablk_cbc_err;
+	err = crypto_register_alg(&ablk_ctr_alg);
+	if (err)
+		goto ablk_ctr_err;
+	return err;
+
+ablk_ctr_err:
+	crypto_unregister_alg(&ablk_cbc_alg);
+ablk_cbc_err:
+	crypto_unregister_alg(&ablk_ecb_alg);
+ablk_ecb_err:
+	crypto_unregister_alg(&blk_ctr_alg);
+blk_ctr_err:
+	crypto_unregister_alg(&blk_cbc_alg);
+blk_cbc_err:
+	crypto_unregister_alg(&blk_ecb_alg);
+blk_ecb_err:
+	return err;
+}
+
+static void __exit serpent_sse2_exit(void)
+{
+	crypto_unregister_alg(&ablk_ctr_alg);
+	crypto_unregister_alg(&ablk_cbc_alg);
+	crypto_unregister_alg(&ablk_ecb_alg);
+	crypto_unregister_alg(&blk_ctr_alg);
+	crypto_unregister_alg(&blk_cbc_alg);
+	crypto_unregister_alg(&blk_ecb_alg);
+}
+
+module_init(serpent_sse2_init);
+module_exit(serpent_sse2_exit);
+
+MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("serpent");
diff --git a/arch/x86/include/asm/serpent.h b/arch/x86/include/asm/serpent.h
new file mode 100644
index 000000000000..b7fd3b595b27
--- /dev/null
+++ b/arch/x86/include/asm/serpent.h
@@ -0,0 +1,32 @@
+#ifndef ASM_X86_SERPENT_H
+#define ASM_X86_SERPENT_H
+
+#include <linux/crypto.h>
+#include <crypto/serpent.h>
+
+#define SERPENT_PARALLEL_BLOCKS 8
+
+asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst,
+				       const u8 *src, bool xor);
+asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst,
+				     const u8 *src);
+
+static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
+				   const u8 *src)
+{
+	__serpent_enc_blk_8way(ctx, dst, src, false);
+}
+
+static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
+				       const u8 *src)
+{
+	__serpent_enc_blk_8way(ctx, dst, src, true);
+}
+
+static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
+				   const u8 *src)
+{
+	serpent_dec_blk_8way(ctx, dst, src);
+}
+
+#endif
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 733208fe0a2d..2df61e458f06 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -766,6 +766,23 @@ config CRYPTO_SERPENT
 	  See also:
 	  <http://www.cl.cam.ac.uk/~rja14/serpent.html>
 
+config CRYPTO_SERPENT_SSE2_X86_64
+	tristate "Serpent cipher algorithm (x86_64/SSE2)"
+	depends on X86 && 64BIT
+	select CRYPTO_ALGAPI
+	select CRYPTO_SERPENT
+	help
+	  Serpent cipher algorithm, by Anderson, Biham & Knudsen.
+
+	  Keys are allowed to be from 0 to 256 bits in length, in steps
+	  of 8 bits.
+
+	  This module provides Serpent cipher algorithm that processes eigth
+	  blocks parallel using SSE2 instruction set.
+
+	  See also:
+	  <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+
 config CRYPTO_TEA
 	tristate "TEA, XTEA and XETA cipher algorithms"
 	select CRYPTO_ALGAPI
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 01553a6754b7..bb54b882d738 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1534,6 +1534,21 @@ static int alg_test_null(const struct alg_test_desc *desc,
 /* Please keep this list sorted by algorithm name. */
 static const struct alg_test_desc alg_test_descs[] = {
 	{
+		.alg = "__cbc-serpent-sse2",
+		.test = alg_test_null,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = NULL,
+					.count = 0
+				},
+				.dec = {
+					.vecs = NULL,
+					.count = 0
+				}
+			}
+		}
+	}, {
 		.alg = "__driver-cbc-aes-aesni",
 		.test = alg_test_null,
 		.suite = {
@@ -1548,6 +1563,21 @@ static const struct alg_test_desc alg_test_descs[] = {
 				}
 			}
 		}
+	}, {
+		.alg = "__driver-cbc-serpent-sse2",
+		.test = alg_test_null,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = NULL,
+					.count = 0
+				},
+				.dec = {
+					.vecs = NULL,
+					.count = 0
+				}
+			}
+		}
 	}, {
 		.alg = "__driver-ecb-aes-aesni",
 		.test = alg_test_null,
@@ -1563,6 +1593,21 @@ static const struct alg_test_desc alg_test_descs[] = {
 				}
 			}
 		}
+	}, {
+		.alg = "__driver-ecb-serpent-sse2",
+		.test = alg_test_null,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = NULL,
+					.count = 0
+				},
+				.dec = {
+					.vecs = NULL,
+					.count = 0
+				}
+			}
+		}
 	}, {
 		.alg = "__ghash-pclmulqdqni",
 		.test = alg_test_null,
@@ -1745,6 +1790,21 @@ static const struct alg_test_desc alg_test_descs[] = {
 				}
 			}
 		}
+	}, {
+		.alg = "cryptd(__driver-ecb-serpent-sse2)",
+		.test = alg_test_null,
+		.suite = {
+			.cipher = {
+				.enc = {
+					.vecs = NULL,
+					.count = 0
+				},
+				.dec = {
+					.vecs = NULL,
+					.count = 0
+				}
+			}
+		}
 	}, {
 		.alg = "cryptd(__ghash-pclmulqdqni)",
 		.test = alg_test_null,

From 251496dbfc1be38bc43b49651f3d33c02faccc47 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Wed, 9 Nov 2011 16:26:31 +0200
Subject: [PATCH 26/54] crypto: serpent - add 4-way parallel i586/SSE2
 assembler implementation

Patch adds i586/SSE2 assembler implementation of serpent cipher. Assembler
functions crypt data in four block chunks.

Patch has been tested with tcrypt and automated filesystem tests.

Tcrypt benchmarks results (serpent-sse2/serpent_generic speed ratios):

Intel Atom N270:

size    ecb-enc ecb-dec cbc-enc cbc-dec ctr-enc ctr-dec
16      0.95x   1.12x   1.02x   1.07x   0.97x   0.98x
64      1.73x   1.82x   1.08x   1.82x   1.72x   1.73x
256     2.08x   2.00x   1.04x   2.07x   1.99x   2.01x
1024    2.28x   2.18x   1.05x   2.23x   2.17x   2.20x
8192    2.28x   2.13x   1.05x   2.23x   2.18x   2.20x

Full output:
 http://koti.mbnet.fi/axh/kernel/crypto/atom-n270/serpent-generic.txt
 http://koti.mbnet.fi/axh/kernel/crypto/atom-n270/serpent-sse2.txt

Userspace test results:

Encryption/decryption of sse2-i586 vs generic on Intel Atom N270:
 encrypt: 2.35x
 decrypt: 2.54x

Encryption/decryption of sse2-i586 vs generic on AMD Phenom II:
 encrypt: 1.82x
 decrypt: 2.51x

Encryption/decryption of sse2-i586 vs generic on Intel Xeon E7330:
 encrypt: 2.99x
 decrypt: 3.48x

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 arch/x86/crypto/Makefile                   |   2 +
 arch/x86/crypto/serpent-sse2-i586-asm_32.S | 638 +++++++++++++++++++++
 arch/x86/include/asm/serpent.h             |  31 +
 crypto/Kconfig                             |  17 +
 4 files changed, 688 insertions(+)
 create mode 100644 arch/x86/crypto/serpent-sse2-i586-asm_32.S

diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 12ebdbd80ccb..2b0b9631474b 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -5,6 +5,7 @@
 obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
 obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
 obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
+obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o
 
 obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
 obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
@@ -21,6 +22,7 @@ obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
 aes-i586-y := aes-i586-asm_32.o aes_glue.o
 twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
 salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
+serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o
 
 aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
 blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
new file mode 100644
index 000000000000..4e37677ca851
--- /dev/null
+++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
@@ -0,0 +1,638 @@
+/*
+ * Serpent Cipher 4-way parallel algorithm (i586/SSE2)
+ *
+ * Copyright (C) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * Based on crypto/serpent.c by
+ *  Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
+ *                2003 Herbert Valerio Riedel <hvr@gnu.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
+ * USA
+ *
+ */
+
+.file "serpent-sse2-i586-asm_32.S"
+.text
+
+#define arg_ctx 4
+#define arg_dst 8
+#define arg_src 12
+#define arg_xor 16
+
+/**********************************************************************
+  4-way SSE2 serpent
+ **********************************************************************/
+#define CTX %edx
+
+#define RA %xmm0
+#define RB %xmm1
+#define RC %xmm2
+#define RD %xmm3
+#define RE %xmm4
+
+#define RT0 %xmm5
+#define RT1 %xmm6
+
+#define RNOT %xmm7
+
+#define get_key(i, j, t) \
+	movd (4*(i)+(j))*4(CTX), t; \
+	pshufd $0, t, t;
+
+#define K(x0, x1, x2, x3, x4, i) \
+	get_key(i, 0, x4); \
+	get_key(i, 1, RT0); \
+	get_key(i, 2, RT1); \
+	pxor x4,		x0; \
+	pxor RT0,		x1; \
+	pxor RT1,		x2; \
+	get_key(i, 3, x4); \
+	pxor x4,		x3;
+
+#define LK(x0, x1, x2, x3, x4, i) \
+	movdqa x0,		x4; \
+	pslld $13,		x0; \
+	psrld $(32 - 13),	x4; \
+	por x4,			x0; \
+	pxor x0,		x1; \
+	movdqa x2,		x4; \
+	pslld $3,		x2; \
+	psrld $(32 - 3),	x4; \
+	por x4,			x2; \
+	pxor x2,		x1; \
+	movdqa x1,		x4; \
+	pslld $1,		x1; \
+	psrld $(32 - 1),	x4; \
+	por x4,			x1; \
+	movdqa x0,		x4; \
+	pslld $3,		x4; \
+	pxor x2,		x3; \
+	pxor x4,		x3; \
+	movdqa x3,		x4; \
+	pslld $7,		x3; \
+	psrld $(32 - 7),	x4; \
+	por x4,			x3; \
+	movdqa x1,		x4; \
+	pslld $7,		x4; \
+	pxor x1,		x0; \
+	pxor x3,		x0; \
+	pxor x3,		x2; \
+	pxor x4,		x2; \
+	movdqa x0,		x4; \
+	get_key(i, 1, RT0); \
+	pxor RT0,		x1; \
+	get_key(i, 3, RT0); \
+	pxor RT0,		x3; \
+	pslld $5,		x0; \
+	psrld $(32 - 5),	x4; \
+	por x4,			x0; \
+	movdqa x2,		x4; \
+	pslld $22,		x2; \
+	psrld $(32 - 22),	x4; \
+	por x4,			x2; \
+	get_key(i, 0, RT0); \
+	pxor RT0,		x0; \
+	get_key(i, 2, RT0); \
+	pxor RT0,		x2;
+
+#define KL(x0, x1, x2, x3, x4, i) \
+	K(x0, x1, x2, x3, x4, i); \
+	movdqa x0,		x4; \
+	psrld $5,		x0; \
+	pslld $(32 - 5),	x4; \
+	por x4,			x0; \
+	movdqa x2,		x4; \
+	psrld $22,		x2; \
+	pslld $(32 - 22),	x4; \
+	por x4,			x2; \
+	pxor x3,		x2; \
+	pxor x3,		x0; \
+	movdqa x1,		x4; \
+	pslld $7,		x4; \
+	pxor x1,		x0; \
+	pxor x4,		x2; \
+	movdqa x1,		x4; \
+	psrld $1,		x1; \
+	pslld $(32 - 1),	x4; \
+	por x4,			x1; \
+	movdqa x3,		x4; \
+	psrld $7,		x3; \
+	pslld $(32 - 7),	x4; \
+	por x4,			x3; \
+	pxor x0,		x1; \
+	movdqa x0,		x4; \
+	pslld $3,		x4; \
+	pxor x4,		x3; \
+	movdqa x0,		x4; \
+	psrld $13,		x0; \
+	pslld $(32 - 13),	x4; \
+	por x4,			x0; \
+	pxor x2,		x1; \
+	pxor x2,		x3; \
+	movdqa x2,		x4; \
+	psrld $3,		x2; \
+	pslld $(32 - 3),	x4; \
+	por x4,			x2;
+
+#define S0(x0, x1, x2, x3, x4) \
+	movdqa x3,		x4; \
+	por x0,			x3; \
+	pxor x4,		x0; \
+	pxor x2,		x4; \
+	pxor RNOT,		x4; \
+	pxor x1,		x3; \
+	pand x0,		x1; \
+	pxor x4,		x1; \
+	pxor x0,		x2; \
+	pxor x3,		x0; \
+	por x0,			x4; \
+	pxor x2,		x0; \
+	pand x1,		x2; \
+	pxor x2,		x3; \
+	pxor RNOT,		x1; \
+	pxor x4,		x2; \
+	pxor x2,		x1;
+
+#define S1(x0, x1, x2, x3, x4) \
+	movdqa x1,		x4; \
+	pxor x0,		x1; \
+	pxor x3,		x0; \
+	pxor RNOT,		x3; \
+	pand x1,		x4; \
+	por x1,			x0; \
+	pxor x2,		x3; \
+	pxor x3,		x0; \
+	pxor x3,		x1; \
+	pxor x4,		x3; \
+	por x4,			x1; \
+	pxor x2,		x4; \
+	pand x0,		x2; \
+	pxor x1,		x2; \
+	por x0,			x1; \
+	pxor RNOT,		x0; \
+	pxor x2,		x0; \
+	pxor x1,		x4;
+
+#define S2(x0, x1, x2, x3, x4) \
+	pxor RNOT,		x3; \
+	pxor x0,		x1; \
+	movdqa x0,		x4; \
+	pand x2,		x0; \
+	pxor x3,		x0; \
+	por x4,			x3; \
+	pxor x1,		x2; \
+	pxor x1,		x3; \
+	pand x0,		x1; \
+	pxor x2,		x0; \
+	pand x3,		x2; \
+	por x1,			x3; \
+	pxor RNOT,		x0; \
+	pxor x0,		x3; \
+	pxor x0,		x4; \
+	pxor x2,		x0; \
+	por x2,			x1;
+
+#define S3(x0, x1, x2, x3, x4) \
+	movdqa x1,		x4; \
+	pxor x3,		x1; \
+	por x0,			x3; \
+	pand x0,		x4; \
+	pxor x2,		x0; \
+	pxor x1,		x2; \
+	pand x3,		x1; \
+	pxor x3,		x2; \
+	por x4,			x0; \
+	pxor x3,		x4; \
+	pxor x0,		x1; \
+	pand x3,		x0; \
+	pand x4,		x3; \
+	pxor x2,		x3; \
+	por x1,			x4; \
+	pand x1,		x2; \
+	pxor x3,		x4; \
+	pxor x3,		x0; \
+	pxor x2,		x3;
+
+#define S4(x0, x1, x2, x3, x4) \
+	movdqa x3,		x4; \
+	pand x0,		x3; \
+	pxor x4,		x0; \
+	pxor x2,		x3; \
+	por x4,			x2; \
+	pxor x1,		x0; \
+	pxor x3,		x4; \
+	por x0,			x2; \
+	pxor x1,		x2; \
+	pand x0,		x1; \
+	pxor x4,		x1; \
+	pand x2,		x4; \
+	pxor x3,		x2; \
+	pxor x0,		x4; \
+	por x1,			x3; \
+	pxor RNOT,		x1; \
+	pxor x0,		x3;
+
+#define S5(x0, x1, x2, x3, x4) \
+	movdqa x1,		x4; \
+	por x0,			x1; \
+	pxor x1,		x2; \
+	pxor RNOT,		x3; \
+	pxor x0,		x4; \
+	pxor x2,		x0; \
+	pand x4,		x1; \
+	por x3,			x4; \
+	pxor x0,		x4; \
+	pand x3,		x0; \
+	pxor x3,		x1; \
+	pxor x2,		x3; \
+	pxor x1,		x0; \
+	pand x4,		x2; \
+	pxor x2,		x1; \
+	pand x0,		x2; \
+	pxor x2,		x3;
+
+#define S6(x0, x1, x2, x3, x4) \
+	movdqa x1,		x4; \
+	pxor x0,		x3; \
+	pxor x2,		x1; \
+	pxor x0,		x2; \
+	pand x3,		x0; \
+	por x3,			x1; \
+	pxor RNOT,		x4; \
+	pxor x1,		x0; \
+	pxor x2,		x1; \
+	pxor x4,		x3; \
+	pxor x0,		x4; \
+	pand x0,		x2; \
+	pxor x1,		x4; \
+	pxor x3,		x2; \
+	pand x1,		x3; \
+	pxor x0,		x3; \
+	pxor x2,		x1;
+
+#define S7(x0, x1, x2, x3, x4) \
+	pxor RNOT,		x1; \
+	movdqa x1,		x4; \
+	pxor RNOT,		x0; \
+	pand x2,		x1; \
+	pxor x3,		x1; \
+	por x4,			x3; \
+	pxor x2,		x4; \
+	pxor x3,		x2; \
+	pxor x0,		x3; \
+	por x1,			x0; \
+	pand x0,		x2; \
+	pxor x4,		x0; \
+	pxor x3,		x4; \
+	pand x0,		x3; \
+	pxor x1,		x4; \
+	pxor x4,		x2; \
+	pxor x1,		x3; \
+	por x0,			x4; \
+	pxor x1,		x4;
+
+#define SI0(x0, x1, x2, x3, x4) \
+	movdqa x3,		x4; \
+	pxor x0,		x1; \
+	por x1,			x3; \
+	pxor x1,		x4; \
+	pxor RNOT,		x0; \
+	pxor x3,		x2; \
+	pxor x0,		x3; \
+	pand x1,		x0; \
+	pxor x2,		x0; \
+	pand x3,		x2; \
+	pxor x4,		x3; \
+	pxor x3,		x2; \
+	pxor x3,		x1; \
+	pand x0,		x3; \
+	pxor x0,		x1; \
+	pxor x2,		x0; \
+	pxor x3,		x4;
+
+#define SI1(x0, x1, x2, x3, x4) \
+	pxor x3,		x1; \
+	movdqa x0,		x4; \
+	pxor x2,		x0; \
+	pxor RNOT,		x2; \
+	por x1,			x4; \
+	pxor x3,		x4; \
+	pand x1,		x3; \
+	pxor x2,		x1; \
+	pand x4,		x2; \
+	pxor x1,		x4; \
+	por x3,			x1; \
+	pxor x0,		x3; \
+	pxor x0,		x2; \
+	por x4,			x0; \
+	pxor x4,		x2; \
+	pxor x0,		x1; \
+	pxor x1,		x4;
+
+#define SI2(x0, x1, x2, x3, x4) \
+	pxor x1,		x2; \
+	movdqa x3,		x4; \
+	pxor RNOT,		x3; \
+	por x2,			x3; \
+	pxor x4,		x2; \
+	pxor x0,		x4; \
+	pxor x1,		x3; \
+	por x2,			x1; \
+	pxor x0,		x2; \
+	pxor x4,		x1; \
+	por x3,			x4; \
+	pxor x3,		x2; \
+	pxor x2,		x4; \
+	pand x1,		x2; \
+	pxor x3,		x2; \
+	pxor x4,		x3; \
+	pxor x0,		x4;
+
+#define SI3(x0, x1, x2, x3, x4) \
+	pxor x1,		x2; \
+	movdqa x1,		x4; \
+	pand x2,		x1; \
+	pxor x0,		x1; \
+	por x4,			x0; \
+	pxor x3,		x4; \
+	pxor x3,		x0; \
+	por x1,			x3; \
+	pxor x2,		x1; \
+	pxor x3,		x1; \
+	pxor x2,		x0; \
+	pxor x3,		x2; \
+	pand x1,		x3; \
+	pxor x0,		x1; \
+	pand x2,		x0; \
+	pxor x3,		x4; \
+	pxor x0,		x3; \
+	pxor x1,		x0;
+
+#define SI4(x0, x1, x2, x3, x4) \
+	pxor x3,		x2; \
+	movdqa x0,		x4; \
+	pand x1,		x0; \
+	pxor x2,		x0; \
+	por x3,			x2; \
+	pxor RNOT,		x4; \
+	pxor x0,		x1; \
+	pxor x2,		x0; \
+	pand x4,		x2; \
+	pxor x0,		x2; \
+	por x4,			x0; \
+	pxor x3,		x0; \
+	pand x2,		x3; \
+	pxor x3,		x4; \
+	pxor x1,		x3; \
+	pand x0,		x1; \
+	pxor x1,		x4; \
+	pxor x3,		x0;
+
+#define SI5(x0, x1, x2, x3, x4) \
+	movdqa x1,		x4; \
+	por x2,			x1; \
+	pxor x4,		x2; \
+	pxor x3,		x1; \
+	pand x4,		x3; \
+	pxor x3,		x2; \
+	por x0,			x3; \
+	pxor RNOT,		x0; \
+	pxor x2,		x3; \
+	por x0,			x2; \
+	pxor x1,		x4; \
+	pxor x4,		x2; \
+	pand x0,		x4; \
+	pxor x1,		x0; \
+	pxor x3,		x1; \
+	pand x2,		x0; \
+	pxor x3,		x2; \
+	pxor x2,		x0; \
+	pxor x4,		x2; \
+	pxor x3,		x4;
+
+#define SI6(x0, x1, x2, x3, x4) \
+	pxor x2,		x0; \
+	movdqa x0,		x4; \
+	pand x3,		x0; \
+	pxor x3,		x2; \
+	pxor x2,		x0; \
+	pxor x1,		x3; \
+	por x4,			x2; \
+	pxor x3,		x2; \
+	pand x0,		x3; \
+	pxor RNOT,		x0; \
+	pxor x1,		x3; \
+	pand x2,		x1; \
+	pxor x0,		x4; \
+	pxor x4,		x3; \
+	pxor x2,		x4; \
+	pxor x1,		x0; \
+	pxor x0,		x2;
+
+#define SI7(x0, x1, x2, x3, x4) \
+	movdqa x3,		x4; \
+	pand x0,		x3; \
+	pxor x2,		x0; \
+	por x4,			x2; \
+	pxor x1,		x4; \
+	pxor RNOT,		x0; \
+	por x3,			x1; \
+	pxor x0,		x4; \
+	pand x2,		x0; \
+	pxor x1,		x0; \
+	pand x2,		x1; \
+	pxor x2,		x3; \
+	pxor x3,		x4; \
+	pand x3,		x2; \
+	por x0,			x3; \
+	pxor x4,		x1; \
+	pxor x4,		x3; \
+	pand x0,		x4; \
+	pxor x2,		x4;
+
+#define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
+	movdqa x2,		t3; \
+	movdqa x0,		t1; \
+	unpcklps x3,		t3; \
+	movdqa x0,		t2; \
+	unpcklps x1,		t1; \
+	unpckhps x1,		t2; \
+	movdqa t3,		x1; \
+	unpckhps x3,		x2; \
+	movdqa t1,		x0; \
+	movhlps t1,		x1; \
+	movdqa t2,		t1; \
+	movlhps t3,		x0; \
+	movlhps x2,		t1; \
+	movhlps t2,		x2; \
+	movdqa x2,		x3; \
+	movdqa t1,		x2;
+
+#define read_blocks(in, x0, x1, x2, x3, t0, t1, t2) \
+	movdqu (0*4*4)(in),	x0; \
+	movdqu (1*4*4)(in),	x1; \
+	movdqu (2*4*4)(in),	x2; \
+	movdqu (3*4*4)(in),	x3; \
+	\
+	transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
+
+#define write_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
+	transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
+	\
+	movdqu x0, (0*4*4)(out); \
+	movdqu x1, (1*4*4)(out); \
+	movdqu x2, (2*4*4)(out); \
+	movdqu x3, (3*4*4)(out);
+
+#define xor_blocks(out, x0, x1, x2, x3, t0, t1, t2) \
+	transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
+	\
+	movdqu (0*4*4)(out),	t0; \
+	pxor t0,		x0; \
+	movdqu x0,		(0*4*4)(out); \
+	movdqu (1*4*4)(out),	t0; \
+	pxor t0,		x1; \
+	movdqu x1,		(1*4*4)(out); \
+	movdqu (2*4*4)(out),	t0; \
+	pxor t0,		x2; \
+	movdqu x2,		(2*4*4)(out); \
+	movdqu (3*4*4)(out),	t0; \
+	pxor t0,		x3; \
+	movdqu x3,		(3*4*4)(out);
+
+.align 8
+.global __serpent_enc_blk_4way
+.type   __serpent_enc_blk_4way,@function;
+
+__serpent_enc_blk_4way:
+	/* input:
+	 *	arg_ctx(%esp): ctx, CTX
+	 *	arg_dst(%esp): dst
+	 *	arg_src(%esp): src
+	 *	arg_xor(%esp): bool, if true: xor output
+	 */
+
+	pcmpeqd RNOT, RNOT;
+
+	movl arg_ctx(%esp), CTX;
+
+	movl arg_src(%esp), %eax;
+	read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
+
+					 K(RA, RB, RC, RD, RE, 0);
+	S0(RA, RB, RC, RD, RE);		LK(RC, RB, RD, RA, RE, 1);
+	S1(RC, RB, RD, RA, RE);		LK(RE, RD, RA, RC, RB, 2);
+	S2(RE, RD, RA, RC, RB);		LK(RB, RD, RE, RC, RA, 3);
+	S3(RB, RD, RE, RC, RA);		LK(RC, RA, RD, RB, RE, 4);
+	S4(RC, RA, RD, RB, RE);		LK(RA, RD, RB, RE, RC, 5);
+	S5(RA, RD, RB, RE, RC);		LK(RC, RA, RD, RE, RB, 6);
+	S6(RC, RA, RD, RE, RB);		LK(RD, RB, RA, RE, RC, 7);
+	S7(RD, RB, RA, RE, RC);		LK(RC, RA, RE, RD, RB, 8);
+	S0(RC, RA, RE, RD, RB);		LK(RE, RA, RD, RC, RB, 9);
+	S1(RE, RA, RD, RC, RB);		LK(RB, RD, RC, RE, RA, 10);
+	S2(RB, RD, RC, RE, RA);		LK(RA, RD, RB, RE, RC, 11);
+	S3(RA, RD, RB, RE, RC);		LK(RE, RC, RD, RA, RB, 12);
+	S4(RE, RC, RD, RA, RB);		LK(RC, RD, RA, RB, RE, 13);
+	S5(RC, RD, RA, RB, RE);		LK(RE, RC, RD, RB, RA, 14);
+	S6(RE, RC, RD, RB, RA);		LK(RD, RA, RC, RB, RE, 15);
+	S7(RD, RA, RC, RB, RE);		LK(RE, RC, RB, RD, RA, 16);
+	S0(RE, RC, RB, RD, RA);		LK(RB, RC, RD, RE, RA, 17);
+	S1(RB, RC, RD, RE, RA);		LK(RA, RD, RE, RB, RC, 18);
+	S2(RA, RD, RE, RB, RC);		LK(RC, RD, RA, RB, RE, 19);
+	S3(RC, RD, RA, RB, RE);		LK(RB, RE, RD, RC, RA, 20);
+	S4(RB, RE, RD, RC, RA);		LK(RE, RD, RC, RA, RB, 21);
+	S5(RE, RD, RC, RA, RB);		LK(RB, RE, RD, RA, RC, 22);
+	S6(RB, RE, RD, RA, RC);		LK(RD, RC, RE, RA, RB, 23);
+	S7(RD, RC, RE, RA, RB);		LK(RB, RE, RA, RD, RC, 24);
+	S0(RB, RE, RA, RD, RC);		LK(RA, RE, RD, RB, RC, 25);
+	S1(RA, RE, RD, RB, RC);		LK(RC, RD, RB, RA, RE, 26);
+	S2(RC, RD, RB, RA, RE);		LK(RE, RD, RC, RA, RB, 27);
+	S3(RE, RD, RC, RA, RB);		LK(RA, RB, RD, RE, RC, 28);
+	S4(RA, RB, RD, RE, RC);		LK(RB, RD, RE, RC, RA, 29);
+	S5(RB, RD, RE, RC, RA);		LK(RA, RB, RD, RC, RE, 30);
+	S6(RA, RB, RD, RC, RE);		LK(RD, RE, RB, RC, RA, 31);
+	S7(RD, RE, RB, RC, RA);		 K(RA, RB, RC, RD, RE, 32);
+
+	movl arg_dst(%esp), %eax;
+
+	cmpb $0, arg_xor(%esp);
+	jnz __enc_xor4;
+
+	write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
+
+	ret;
+
+__enc_xor4:
+	xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
+
+	ret;
+
+.align 8
+.global serpent_dec_blk_4way
+.type   serpent_dec_blk_4way,@function;
+
+serpent_dec_blk_4way:
+	/* input:
+	 *	arg_ctx(%esp): ctx, CTX
+	 *	arg_dst(%esp): dst
+	 *	arg_src(%esp): src
+	 */
+
+	pcmpeqd RNOT, RNOT;
+
+	movl arg_ctx(%esp), CTX;
+
+	movl arg_src(%esp), %eax;
+	read_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
+
+					 K(RA, RB, RC, RD, RE, 32);
+	SI7(RA, RB, RC, RD, RE);	KL(RB, RD, RA, RE, RC, 31);
+	SI6(RB, RD, RA, RE, RC);	KL(RA, RC, RE, RB, RD, 30);
+	SI5(RA, RC, RE, RB, RD);	KL(RC, RD, RA, RE, RB, 29);
+	SI4(RC, RD, RA, RE, RB);	KL(RC, RA, RB, RE, RD, 28);
+	SI3(RC, RA, RB, RE, RD);	KL(RB, RC, RD, RE, RA, 27);
+	SI2(RB, RC, RD, RE, RA);	KL(RC, RA, RE, RD, RB, 26);
+	SI1(RC, RA, RE, RD, RB);	KL(RB, RA, RE, RD, RC, 25);
+	SI0(RB, RA, RE, RD, RC);	KL(RE, RC, RA, RB, RD, 24);
+	SI7(RE, RC, RA, RB, RD);	KL(RC, RB, RE, RD, RA, 23);
+	SI6(RC, RB, RE, RD, RA);	KL(RE, RA, RD, RC, RB, 22);
+	SI5(RE, RA, RD, RC, RB);	KL(RA, RB, RE, RD, RC, 21);
+	SI4(RA, RB, RE, RD, RC);	KL(RA, RE, RC, RD, RB, 20);
+	SI3(RA, RE, RC, RD, RB);	KL(RC, RA, RB, RD, RE, 19);
+	SI2(RC, RA, RB, RD, RE);	KL(RA, RE, RD, RB, RC, 18);
+	SI1(RA, RE, RD, RB, RC);	KL(RC, RE, RD, RB, RA, 17);
+	SI0(RC, RE, RD, RB, RA);	KL(RD, RA, RE, RC, RB, 16);
+	SI7(RD, RA, RE, RC, RB);	KL(RA, RC, RD, RB, RE, 15);
+	SI6(RA, RC, RD, RB, RE);	KL(RD, RE, RB, RA, RC, 14);
+	SI5(RD, RE, RB, RA, RC);	KL(RE, RC, RD, RB, RA, 13);
+	SI4(RE, RC, RD, RB, RA);	KL(RE, RD, RA, RB, RC, 12);
+	SI3(RE, RD, RA, RB, RC);	KL(RA, RE, RC, RB, RD, 11);
+	SI2(RA, RE, RC, RB, RD);	KL(RE, RD, RB, RC, RA, 10);
+	SI1(RE, RD, RB, RC, RA);	KL(RA, RD, RB, RC, RE, 9);
+	SI0(RA, RD, RB, RC, RE);	KL(RB, RE, RD, RA, RC, 8);
+	SI7(RB, RE, RD, RA, RC);	KL(RE, RA, RB, RC, RD, 7);
+	SI6(RE, RA, RB, RC, RD);	KL(RB, RD, RC, RE, RA, 6);
+	SI5(RB, RD, RC, RE, RA);	KL(RD, RA, RB, RC, RE, 5);
+	SI4(RD, RA, RB, RC, RE);	KL(RD, RB, RE, RC, RA, 4);
+	SI3(RD, RB, RE, RC, RA);	KL(RE, RD, RA, RC, RB, 3);
+	SI2(RE, RD, RA, RC, RB);	KL(RD, RB, RC, RA, RE, 2);
+	SI1(RD, RB, RC, RA, RE);	KL(RE, RB, RC, RA, RD, 1);
+	SI0(RE, RB, RC, RA, RD);	 K(RC, RD, RB, RE, RA, 0);
+
+	movl arg_dst(%esp), %eax;
+	write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);
+
+	ret;
diff --git a/arch/x86/include/asm/serpent.h b/arch/x86/include/asm/serpent.h
index b7fd3b595b27..d3ef63fe0c81 100644
--- a/arch/x86/include/asm/serpent.h
+++ b/arch/x86/include/asm/serpent.h
@@ -4,6 +4,35 @@
 #include <linux/crypto.h>
 #include <crypto/serpent.h>
 
+#ifdef CONFIG_X86_32
+
+#define SERPENT_PARALLEL_BLOCKS 4
+
+asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst,
+				       const u8 *src, bool xor);
+asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst,
+				     const u8 *src);
+
+static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
+					const u8 *src)
+{
+	__serpent_enc_blk_4way(ctx, dst, src, false);
+}
+
+static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
+					    const u8 *src)
+{
+	__serpent_enc_blk_4way(ctx, dst, src, true);
+}
+
+static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
+					const u8 *src)
+{
+	serpent_dec_blk_4way(ctx, dst, src);
+}
+
+#else
+
 #define SERPENT_PARALLEL_BLOCKS 8
 
 asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst,
@@ -30,3 +59,5 @@ static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
 }
 
 #endif
+
+#endif
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 2df61e458f06..16e0c1304c3a 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -783,6 +783,23 @@ config CRYPTO_SERPENT_SSE2_X86_64
 	  See also:
 	  <http://www.cl.cam.ac.uk/~rja14/serpent.html>
 
+config CRYPTO_SERPENT_SSE2_586
+	tristate "Serpent cipher algorithm (i586/SSE2)"
+	depends on X86 && !64BIT
+	select CRYPTO_ALGAPI
+	select CRYPTO_SERPENT
+	help
+	  Serpent cipher algorithm, by Anderson, Biham & Knudsen.
+
+	  Keys are allowed to be from 0 to 256 bits in length, in steps
+	  of 8 bits.
+
+	  This module provides Serpent cipher algorithm that processes four
+	  blocks parallel using SSE2 instruction set.
+
+	  See also:
+	  <http://www.cl.cam.ac.uk/~rja14/serpent.html>
+
 config CRYPTO_TEA
 	tristate "TEA, XTEA and XETA cipher algorithms"
 	select CRYPTO_ALGAPI

From 18482053f92b099663bd36a10e8f6bd2c8544669 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Wed, 9 Nov 2011 16:26:36 +0200
Subject: [PATCH 27/54] crypto: serpent-sse2 - add lrw support

Patch adds LRW support for serpent-sse2 by using lrw_crypt(). Patch has been
tested with tcrypt and automated filesystem tests.

Tcrypt benchmarks results (serpent-sse2/serpent_generic speed ratios):

Benchmark results with tcrypt:

Intel Celeron T1600 (x86_64) (fam:6, model:15, step:13):
size    lrw-enc lrw-dec
16B     1.00x   0.96x
64B     1.01x   1.01x
256B    3.01x   2.97x
1024B   3.39x   3.33x
8192B   3.35x   3.33x

AMD Phenom II 1055T (x86_64) (fam:16, model:10):
size    lrw-enc lrw-dec
16B     0.98x   1.03x
64B     1.01x   1.04x
256B    2.10x   2.14x
1024B   2.28x   2.33x
8192B   2.30x   2.33x

Intel Atom N270 (i586):
size    lrw-enc lrw-dec
16B     0.97x   0.97x
64B     1.47x   1.50x
256B    1.72x   1.69x
1024B   1.88x   1.81x
8192B   1.84x   1.79x

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 arch/x86/crypto/serpent_sse2_glue.c | 211 ++++++++++++++++++++++++++++
 crypto/serpent.c                    |  10 +-
 include/crypto/serpent.h            |   2 +
 3 files changed, 221 insertions(+), 2 deletions(-)

diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 947cf570f6a7..db318e5cb240 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -39,12 +39,17 @@
 #include <crypto/cryptd.h>
 #include <crypto/b128ops.h>
 #include <crypto/ctr.h>
+#include <crypto/lrw.h>
 #include <asm/i387.h>
 #include <asm/serpent.h>
 #include <crypto/scatterwalk.h>
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
 
+#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
+#define HAS_LRW
+#endif
+
 struct async_serpent_ctx {
 	struct cryptd_ablkcipher *cryptd_tfm;
 };
@@ -460,6 +465,152 @@ static struct crypto_alg blk_ctr_alg = {
 	},
 };
 
+#ifdef HAS_LRW
+
+struct crypt_priv {
+	struct serpent_ctx *ctx;
+	bool fpu_enabled;
+};
+
+static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+{
+	const unsigned int bsize = SERPENT_BLOCK_SIZE;
+	struct crypt_priv *ctx = priv;
+	int i;
+
+	ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+
+	if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
+		serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst);
+		return;
+	}
+
+	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+		__serpent_encrypt(ctx->ctx, srcdst, srcdst);
+}
+
+static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
+{
+	const unsigned int bsize = SERPENT_BLOCK_SIZE;
+	struct crypt_priv *ctx = priv;
+	int i;
+
+	ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes);
+
+	if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) {
+		serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst);
+		return;
+	}
+
+	for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
+		__serpent_decrypt(ctx->ctx, srcdst, srcdst);
+}
+
+struct serpent_lrw_ctx {
+	struct lrw_table_ctx lrw_table;
+	struct serpent_ctx serpent_ctx;
+};
+
+static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
+			      unsigned int keylen)
+{
+	struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
+	int err;
+
+	err = __serpent_setkey(&ctx->serpent_ctx, key, keylen -
+							SERPENT_BLOCK_SIZE);
+	if (err)
+		return err;
+
+	return lrw_init_table(&ctx->lrw_table, key + keylen -
+						SERPENT_BLOCK_SIZE);
+}
+
+static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	be128 buf[SERPENT_PARALLEL_BLOCKS];
+	struct crypt_priv crypt_ctx = {
+		.ctx = &ctx->serpent_ctx,
+		.fpu_enabled = false,
+	};
+	struct lrw_crypt_req req = {
+		.tbuf = buf,
+		.tbuflen = sizeof(buf),
+
+		.table_ctx = &ctx->lrw_table,
+		.crypt_ctx = &crypt_ctx,
+		.crypt_fn = encrypt_callback,
+	};
+	int ret;
+
+	ret = lrw_crypt(desc, dst, src, nbytes, &req);
+	serpent_fpu_end(crypt_ctx.fpu_enabled);
+
+	return ret;
+}
+
+static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	be128 buf[SERPENT_PARALLEL_BLOCKS];
+	struct crypt_priv crypt_ctx = {
+		.ctx = &ctx->serpent_ctx,
+		.fpu_enabled = false,
+	};
+	struct lrw_crypt_req req = {
+		.tbuf = buf,
+		.tbuflen = sizeof(buf),
+
+		.table_ctx = &ctx->lrw_table,
+		.crypt_ctx = &crypt_ctx,
+		.crypt_fn = decrypt_callback,
+	};
+	int ret;
+
+	ret = lrw_crypt(desc, dst, src, nbytes, &req);
+	serpent_fpu_end(crypt_ctx.fpu_enabled);
+
+	return ret;
+}
+
+static void lrw_exit_tfm(struct crypto_tfm *tfm)
+{
+	struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	lrw_free_table(&ctx->lrw_table);
+}
+
+static struct crypto_alg blk_lrw_alg = {
+	.cra_name		= "__lrw-serpent-sse2",
+	.cra_driver_name	= "__driver-lrw-serpent-sse2",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= SERPENT_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct serpent_lrw_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(blk_lrw_alg.cra_list),
+	.cra_exit		= lrw_exit_tfm,
+	.cra_u = {
+		.blkcipher = {
+			.min_keysize	= SERPENT_MIN_KEY_SIZE +
+					  SERPENT_BLOCK_SIZE,
+			.max_keysize	= SERPENT_MAX_KEY_SIZE +
+					  SERPENT_BLOCK_SIZE,
+			.ivsize		= SERPENT_BLOCK_SIZE,
+			.setkey		= lrw_serpent_setkey,
+			.encrypt	= lrw_encrypt,
+			.decrypt	= lrw_decrypt,
+		},
+	},
+};
+
+#endif
+
 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
 			unsigned int key_len)
 {
@@ -658,6 +809,48 @@ static struct crypto_alg ablk_ctr_alg = {
 	},
 };
 
+#ifdef HAS_LRW
+
+static int ablk_lrw_init(struct crypto_tfm *tfm)
+{
+	struct cryptd_ablkcipher *cryptd_tfm;
+
+	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-lrw-serpent-sse2", 0, 0);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+	ablk_init_common(tfm, cryptd_tfm);
+	return 0;
+}
+
+static struct crypto_alg ablk_lrw_alg = {
+	.cra_name		= "lrw(serpent)",
+	.cra_driver_name	= "lrw-serpent-sse2",
+	.cra_priority		= 400,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= SERPENT_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct async_serpent_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
+	.cra_init		= ablk_lrw_init,
+	.cra_exit		= ablk_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.min_keysize	= SERPENT_MIN_KEY_SIZE +
+					  SERPENT_BLOCK_SIZE,
+			.max_keysize	= SERPENT_MAX_KEY_SIZE +
+					  SERPENT_BLOCK_SIZE,
+			.ivsize		= SERPENT_BLOCK_SIZE,
+			.setkey		= ablk_set_key,
+			.encrypt	= ablk_encrypt,
+			.decrypt	= ablk_decrypt,
+		},
+	},
+};
+
+#endif
+
 static int __init serpent_sse2_init(void)
 {
 	int err;
@@ -685,8 +878,22 @@ static int __init serpent_sse2_init(void)
 	err = crypto_register_alg(&ablk_ctr_alg);
 	if (err)
 		goto ablk_ctr_err;
+#ifdef HAS_LRW
+	err = crypto_register_alg(&blk_lrw_alg);
+	if (err)
+		goto blk_lrw_err;
+	err = crypto_register_alg(&ablk_lrw_alg);
+	if (err)
+		goto ablk_lrw_err;
+#endif
 	return err;
 
+#ifdef HAS_LRW
+ablk_lrw_err:
+	crypto_unregister_alg(&blk_lrw_alg);
+blk_lrw_err:
+	crypto_unregister_alg(&ablk_ctr_alg);
+#endif
 ablk_ctr_err:
 	crypto_unregister_alg(&ablk_cbc_alg);
 ablk_cbc_err:
@@ -703,6 +910,10 @@ blk_ecb_err:
 
 static void __exit serpent_sse2_exit(void)
 {
+#ifdef HAS_LRW
+	crypto_unregister_alg(&ablk_lrw_alg);
+	crypto_unregister_alg(&blk_lrw_alg);
+#endif
 	crypto_unregister_alg(&ablk_ctr_alg);
 	crypto_unregister_alg(&ablk_cbc_alg);
 	crypto_unregister_alg(&ablk_ecb_alg);
diff --git a/crypto/serpent.c b/crypto/serpent.c
index eb6163041b08..7cc2caeb88a5 100644
--- a/crypto/serpent.c
+++ b/crypto/serpent.c
@@ -206,9 +206,9 @@
 	x1 ^= x4;	x3 ^= x4;	x4 &= x0;	\
 	x4 ^= x2;
 
-int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
+int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
+		     unsigned int keylen)
 {
-	struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
 	u32 *k = ctx->expkey;
 	u8  *k8 = (u8 *)k;
 	u32 r0,r1,r2,r3,r4;
@@ -349,6 +349,12 @@ int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(__serpent_setkey);
+
+int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
+{
+	return __serpent_setkey(crypto_tfm_ctx(tfm), key, keylen);
+}
 EXPORT_SYMBOL_GPL(serpent_setkey);
 
 void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h
index 40df885f9d1f..b7e0941eb6fc 100644
--- a/include/crypto/serpent.h
+++ b/include/crypto/serpent.h
@@ -17,6 +17,8 @@ struct serpent_ctx {
 	u32 expkey[SERPENT_EXPKEY_WORDS];
 };
 
+int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
+		     unsigned int keylen);
 int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
 
 void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);

From 5962f8b66dd040ad89d55b58967ea2dec607f4d3 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Wed, 9 Nov 2011 16:26:41 +0200
Subject: [PATCH 28/54] crypto: serpent-sse2 - add xts support

Patch adds XTS support for serpent-sse2 by using xts_crypt(). Patch has been
tested with tcrypt and automated filesystem tests.

Tcrypt benchmarks results (serpent-sse2/serpent_generic speed ratios):

Intel Celeron T1600 (x86_64) (fam:6, model:15, step:13):
size    xts-enc xts-dec
16B     0.98x   1.00x
64B     1.00x   1.01x
256B    2.78x   2.75x
1024B   3.30x   3.26x
8192B   3.39x   3.30x

AMD Phenom II 1055T (x86_64) (fam:16, model:10):
size    xts-enc xts-dec
16B     1.05x   1.02x
64B     1.04x   1.03x
256B    2.10x   2.05x
1024B   2.34x   2.35x
8192B   2.34x   2.40x

Intel Atom N270 (i586):
size    xts-enc xts-dec
16B     0.95x   0.96x
64B     1.53x   1.50x
256B    1.72x   1.75x
1024B   1.88x   1.87x
8192B   1.86x   1.83x

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 arch/x86/crypto/serpent_sse2_glue.c | 180 +++++++++++++++++++++++++++-
 1 file changed, 178 insertions(+), 2 deletions(-)

diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index db318e5cb240..2dffc5ab883e 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -40,6 +40,7 @@
 #include <crypto/b128ops.h>
 #include <crypto/ctr.h>
 #include <crypto/lrw.h>
+#include <crypto/xts.h>
 #include <asm/i387.h>
 #include <asm/serpent.h>
 #include <crypto/scatterwalk.h>
@@ -50,6 +51,10 @@
 #define HAS_LRW
 #endif
 
+#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
+#define HAS_XTS
+#endif
+
 struct async_serpent_ctx {
 	struct cryptd_ablkcipher *cryptd_tfm;
 };
@@ -465,7 +470,7 @@ static struct crypto_alg blk_ctr_alg = {
 	},
 };
 
-#ifdef HAS_LRW
+#if defined(HAS_LRW) || defined(HAS_XTS)
 
 struct crypt_priv {
 	struct serpent_ctx *ctx;
@@ -506,6 +511,10 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
 		__serpent_decrypt(ctx->ctx, srcdst, srcdst);
 }
 
+#endif
+
+#ifdef HAS_LRW
+
 struct serpent_lrw_ctx {
 	struct lrw_table_ctx lrw_table;
 	struct serpent_ctx serpent_ctx;
@@ -611,6 +620,114 @@ static struct crypto_alg blk_lrw_alg = {
 
 #endif
 
+#ifdef HAS_XTS
+
+struct serpent_xts_ctx {
+	struct serpent_ctx tweak_ctx;
+	struct serpent_ctx crypt_ctx;
+};
+
+static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
+			      unsigned int keylen)
+{
+	struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 *flags = &tfm->crt_flags;
+	int err;
+
+	/* key consists of keys of equal size concatenated, therefore
+	 * the length must be even
+	 */
+	if (keylen % 2) {
+		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		return -EINVAL;
+	}
+
+	/* first half of xts-key is for crypt */
+	err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2);
+	if (err)
+		return err;
+
+	/* second half of xts-key is for tweak */
+	return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2);
+}
+
+static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	be128 buf[SERPENT_PARALLEL_BLOCKS];
+	struct crypt_priv crypt_ctx = {
+		.ctx = &ctx->crypt_ctx,
+		.fpu_enabled = false,
+	};
+	struct xts_crypt_req req = {
+		.tbuf = buf,
+		.tbuflen = sizeof(buf),
+
+		.tweak_ctx = &ctx->tweak_ctx,
+		.tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
+		.crypt_ctx = &crypt_ctx,
+		.crypt_fn = encrypt_callback,
+	};
+	int ret;
+
+	ret = xts_crypt(desc, dst, src, nbytes, &req);
+	serpent_fpu_end(crypt_ctx.fpu_enabled);
+
+	return ret;
+}
+
+static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+		       struct scatterlist *src, unsigned int nbytes)
+{
+	struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	be128 buf[SERPENT_PARALLEL_BLOCKS];
+	struct crypt_priv crypt_ctx = {
+		.ctx = &ctx->crypt_ctx,
+		.fpu_enabled = false,
+	};
+	struct xts_crypt_req req = {
+		.tbuf = buf,
+		.tbuflen = sizeof(buf),
+
+		.tweak_ctx = &ctx->tweak_ctx,
+		.tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt),
+		.crypt_ctx = &crypt_ctx,
+		.crypt_fn = decrypt_callback,
+	};
+	int ret;
+
+	ret = xts_crypt(desc, dst, src, nbytes, &req);
+	serpent_fpu_end(crypt_ctx.fpu_enabled);
+
+	return ret;
+}
+
+static struct crypto_alg blk_xts_alg = {
+	.cra_name		= "__xts-serpent-sse2",
+	.cra_driver_name	= "__driver-xts-serpent-sse2",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= SERPENT_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct serpent_xts_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(blk_xts_alg.cra_list),
+	.cra_u = {
+		.blkcipher = {
+			.min_keysize	= SERPENT_MIN_KEY_SIZE * 2,
+			.max_keysize	= SERPENT_MAX_KEY_SIZE * 2,
+			.ivsize		= SERPENT_BLOCK_SIZE,
+			.setkey		= xts_serpent_setkey,
+			.encrypt	= xts_encrypt,
+			.decrypt	= xts_decrypt,
+		},
+	},
+};
+
+#endif
+
 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
 			unsigned int key_len)
 {
@@ -851,6 +968,46 @@ static struct crypto_alg ablk_lrw_alg = {
 
 #endif
 
+#ifdef HAS_XTS
+
+static int ablk_xts_init(struct crypto_tfm *tfm)
+{
+	struct cryptd_ablkcipher *cryptd_tfm;
+
+	cryptd_tfm = cryptd_alloc_ablkcipher("__driver-xts-serpent-sse2", 0, 0);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+	ablk_init_common(tfm, cryptd_tfm);
+	return 0;
+}
+
+static struct crypto_alg ablk_xts_alg = {
+	.cra_name		= "xts(serpent)",
+	.cra_driver_name	= "xts-serpent-sse2",
+	.cra_priority		= 400,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= SERPENT_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct async_serpent_ctx),
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(ablk_xts_alg.cra_list),
+	.cra_init		= ablk_xts_init,
+	.cra_exit		= ablk_exit,
+	.cra_u = {
+		.ablkcipher = {
+			.min_keysize	= SERPENT_MIN_KEY_SIZE * 2,
+			.max_keysize	= SERPENT_MAX_KEY_SIZE * 2,
+			.ivsize		= SERPENT_BLOCK_SIZE,
+			.setkey		= ablk_set_key,
+			.encrypt	= ablk_encrypt,
+			.decrypt	= ablk_decrypt,
+		},
+	},
+};
+
+#endif
+
 static int __init serpent_sse2_init(void)
 {
 	int err;
@@ -885,15 +1042,30 @@ static int __init serpent_sse2_init(void)
 	err = crypto_register_alg(&ablk_lrw_alg);
 	if (err)
 		goto ablk_lrw_err;
+#endif
+#ifdef HAS_XTS
+	err = crypto_register_alg(&blk_xts_alg);
+	if (err)
+		goto blk_xts_err;
+	err = crypto_register_alg(&ablk_xts_alg);
+	if (err)
+		goto ablk_xts_err;
 #endif
 	return err;
 
+#ifdef HAS_XTS
+	crypto_unregister_alg(&ablk_xts_alg);
+ablk_xts_err:
+	crypto_unregister_alg(&blk_xts_alg);
+blk_xts_err:
+#endif
 #ifdef HAS_LRW
+	crypto_unregister_alg(&ablk_lrw_alg);
 ablk_lrw_err:
 	crypto_unregister_alg(&blk_lrw_alg);
 blk_lrw_err:
-	crypto_unregister_alg(&ablk_ctr_alg);
 #endif
+	crypto_unregister_alg(&ablk_ctr_alg);
 ablk_ctr_err:
 	crypto_unregister_alg(&ablk_cbc_alg);
 ablk_cbc_err:
@@ -910,6 +1082,10 @@ blk_ecb_err:
 
 static void __exit serpent_sse2_exit(void)
 {
+#ifdef HAS_XTS
+	crypto_unregister_alg(&ablk_xts_alg);
+	crypto_unregister_alg(&blk_xts_alg);
+#endif
 #ifdef HAS_LRW
 	crypto_unregister_alg(&ablk_lrw_alg);
 	crypto_unregister_alg(&blk_lrw_alg);

From d35643385628d44a5933a0755b01478eb4df5c65 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Wed, 9 Nov 2011 19:44:12 +0200
Subject: [PATCH 29/54] crypto: serpent-sse2 - clear CRYPTO_TFM_REQ_MAY_SLEEP
 in lrw and xts modes

LRW/XTS patches for serpent-sse2 forgot to add this. CRYPTO_TFM_REQ_MAY_SLEEP
should be cleared as sleeping between kernel_fpu_begin()/kernel_fpu_end() is
not allowed.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 arch/x86/crypto/serpent_sse2_glue.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 2dffc5ab883e..2f5c304653f4 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -554,6 +554,7 @@ static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 	};
 	int ret;
 
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 	ret = lrw_crypt(desc, dst, src, nbytes, &req);
 	serpent_fpu_end(crypt_ctx.fpu_enabled);
 
@@ -579,6 +580,7 @@ static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 	};
 	int ret;
 
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 	ret = lrw_crypt(desc, dst, src, nbytes, &req);
 	serpent_fpu_end(crypt_ctx.fpu_enabled);
 
@@ -671,6 +673,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 	};
 	int ret;
 
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 	ret = xts_crypt(desc, dst, src, nbytes, &req);
 	serpent_fpu_end(crypt_ctx.fpu_enabled);
 
@@ -697,6 +700,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 	};
 	int ret;
 
+	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 	ret = xts_crypt(desc, dst, src, nbytes, &req);
 	serpent_fpu_end(crypt_ctx.fpu_enabled);
 

From 79b3a418e090248d00ceba40b81da9dfac753367 Mon Sep 17 00:00:00 2001
From: Lee Nipper <lee.nipper@gmail.com>
Date: Mon, 21 Nov 2011 16:13:25 +0800
Subject: [PATCH 30/54] crypto: talitos - add hmac algorithms

Add these hmac algorithms to talitos:
    hmac(md5),
    hmac(sha1),
    hmac(sha224),
    hmac(sha256),
    hmac(sha384),
    hmac(sha512).
These are all type ahash.

Signed-off-by: Lee Nipper <lee.nipper@gmail.com>

Fixed up to not register HMAC algorithms on sec2.0 devices.
Rationale (from Lee):

on an 8349E Rev1.1, there's a problem with hmac for any talitos
hmac sequence requiring an intermediate hash context (Pointer
DWORD 1); the result is an incorrect hmac.  An intermediate hash
context is required for something longer than (65536-blocksize),
and for other cases when update/finup/final are used inefficiently.
Interestingly, a normal hash (without hmac) works perfectly
when using an intermediate context.

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/talitos.c | 237 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 235 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dbe76b5df9cf..8ce87317310b 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -157,6 +157,7 @@ struct talitos_private {
 #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
 #define TALITOS_FTR_SHA224_HWINIT 0x00000004
+#define TALITOS_FTR_HMAC_OK 0x00000008
 
 static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
 {
@@ -1874,6 +1875,97 @@ static int ahash_digest(struct ahash_request *areq)
 	return ahash_process_req(areq, areq->nbytes);
 }
 
+struct keyhash_result {
+	struct completion completion;
+	int err;
+};
+
+static void keyhash_complete(struct crypto_async_request *req, int err)
+{
+	struct keyhash_result *res = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	res->err = err;
+	complete(&res->completion);
+}
+
+static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
+		   u8 *hash)
+{
+	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+
+	struct scatterlist sg[1];
+	struct ahash_request *req;
+	struct keyhash_result hresult;
+	int ret;
+
+	init_completion(&hresult.completion);
+
+	req = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	/* Keep tfm keylen == 0 during hash of the long key */
+	ctx->keylen = 0;
+	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   keyhash_complete, &hresult);
+
+	sg_init_one(&sg[0], key, keylen);
+
+	ahash_request_set_crypt(req, sg, hash, keylen);
+	ret = crypto_ahash_digest(req);
+	switch (ret) {
+	case 0:
+		break;
+	case -EINPROGRESS:
+	case -EBUSY:
+		ret = wait_for_completion_interruptible(
+			&hresult.completion);
+		if (!ret)
+			ret = hresult.err;
+		break;
+	default:
+		break;
+	}
+	ahash_request_free(req);
+
+	return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+	unsigned int blocksize =
+			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+	unsigned int digestsize = crypto_ahash_digestsize(tfm);
+	unsigned int keysize = keylen;
+	u8 hash[SHA512_DIGEST_SIZE];
+	int ret;
+
+	if (keylen <= blocksize)
+		memcpy(ctx->key, key, keysize);
+	else {
+		/* Must get the hash of the long key */
+		ret = keyhash(tfm, key, keylen, hash);
+
+		if (ret) {
+			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+			return -EINVAL;
+		}
+
+		keysize = digestsize;
+		memcpy(ctx->key, hash, digestsize);
+	}
+
+	ctx->keylen = keysize;
+
+	return 0;
+}
+
+
 struct talitos_alg_template {
 	u32 type;
 	union {
@@ -2217,6 +2309,138 @@ static struct talitos_alg_template driver_algs[] = {
 				     DESC_HDR_SEL0_MDEUB |
 				     DESC_HDR_MODE0_MDEUB_SHA512,
 	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.setkey = ahash_setkey,
+			.halg.digestsize = MD5_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "hmac(md5)",
+				.cra_driver_name = "hmac-md5-talitos",
+				.cra_blocksize = MD5_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_MD5,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.setkey = ahash_setkey,
+			.halg.digestsize = SHA1_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "hmac(sha1)",
+				.cra_driver_name = "hmac-sha1-talitos",
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA1,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.setkey = ahash_setkey,
+			.halg.digestsize = SHA224_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "hmac(sha224)",
+				.cra_driver_name = "hmac-sha224-talitos",
+				.cra_blocksize = SHA224_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA224,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.setkey = ahash_setkey,
+			.halg.digestsize = SHA256_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "hmac(sha256)",
+				.cra_driver_name = "hmac-sha256-talitos",
+				.cra_blocksize = SHA256_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUA |
+				     DESC_HDR_MODE0_MDEU_SHA256,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.setkey = ahash_setkey,
+			.halg.digestsize = SHA384_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "hmac(sha384)",
+				.cra_driver_name = "hmac-sha384-talitos",
+				.cra_blocksize = SHA384_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUB |
+				     DESC_HDR_MODE0_MDEUB_SHA384,
+	},
+	{	.type = CRYPTO_ALG_TYPE_AHASH,
+		.alg.hash = {
+			.init = ahash_init,
+			.update = ahash_update,
+			.final = ahash_final,
+			.finup = ahash_finup,
+			.digest = ahash_digest,
+			.setkey = ahash_setkey,
+			.halg.digestsize = SHA512_DIGEST_SIZE,
+			.halg.base = {
+				.cra_name = "hmac(sha512)",
+				.cra_driver_name = "hmac-sha512-talitos",
+				.cra_blocksize = SHA512_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
+					     CRYPTO_ALG_ASYNC,
+				.cra_type = &crypto_ahash_type
+			}
+		},
+		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+				     DESC_HDR_SEL0_MDEUB |
+				     DESC_HDR_MODE0_MDEUB_SHA512,
+	}
 };
 
 struct talitos_crypto_alg {
@@ -2373,8 +2597,12 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
 	case CRYPTO_ALG_TYPE_AHASH:
 		alg = &t_alg->algt.alg.hash.halg.base;
 		alg->cra_init = talitos_cra_init_ahash;
+		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
+		    !strncmp(alg->cra_name, "hmac", 4))
+			return ERR_PTR(-ENOTSUPP);
 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
-		    !strcmp(alg->cra_name, "sha224")) {
+		    (!strcmp(alg->cra_name, "sha224") ||
+		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
 			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
 			t_alg->algt.desc_hdr_template =
 					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2471,7 +2699,8 @@ static int talitos_probe(struct platform_device *ofdev)
 
 	if (of_device_is_compatible(np, "fsl,sec2.1"))
 		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
-				  TALITOS_FTR_SHA224_HWINIT;
+				  TALITOS_FTR_SHA224_HWINIT |
+				  TALITOS_FTR_HMAC_OK;
 
 	priv->chan = kzalloc(sizeof(struct talitos_channel) *
 			     priv->num_channels, GFP_KERNEL);
@@ -2530,6 +2759,10 @@ static int talitos_probe(struct platform_device *ofdev)
 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
 			if (IS_ERR(t_alg)) {
 				err = PTR_ERR(t_alg);
+				if (err == -ENOTSUPP) {
+					kfree(t_alg);
+					continue;
+				}
 				goto err_out;
 			}
 

From 5b859b6ebb18b37244d44b5300bf765694b7303c Mon Sep 17 00:00:00 2001
From: Kim Phillips <kim.phillips@freescale.com>
Date: Mon, 21 Nov 2011 16:13:26 +0800
Subject: [PATCH 31/54] crypto: talitos - be less noisy on startup

talitos prints every algorithm it registers at module load time.
Algorithms are being added that make for an excessively noisy console
(latest HMACs patch makes an SEC 3.1 print 20 lines).
Instead, display the SEC h/w version number, and inform the
user of algorithm registration status in /proc/crypto, like so:

talitos ffe30000.crypto: fsl,sec3.1 algorithms registered in /proc/crypto

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/talitos.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 8ce87317310b..c372a18ed22e 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2784,12 +2784,13 @@ static int talitos_probe(struct platform_device *ofdev)
 				dev_err(dev, "%s alg registration failed\n",
 					name);
 				kfree(t_alg);
-			} else {
+			} else
 				list_add_tail(&t_alg->entry, &priv->alg_list);
-				dev_info(dev, "%s\n", name);
-			}
 		}
 	}
+	if (!list_empty(&priv->alg_list))
+		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
+			 (char *)of_get_property(np, "compatible", NULL));
 
 	return 0;
 

From ad42d5fc85383278663ecb58a24f6547ad0ba735 Mon Sep 17 00:00:00 2001
From: Kim Phillips <kim.phillips@freescale.com>
Date: Mon, 21 Nov 2011 16:13:27 +0800
Subject: [PATCH 32/54] crypto: talitos - prepare driver for channel remap
 support

Add a reg member to the channel struct and use it to
access channels.

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/talitos.c | 37 ++++++++++++++++++++++---------------
 drivers/crypto/talitos.h | 31 ++++++++++++++++---------------
 2 files changed, 38 insertions(+), 30 deletions(-)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c372a18ed22e..7f82e91e461c 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -99,6 +99,8 @@ struct talitos_request {
 
 /* per-channel fifo management */
 struct talitos_channel {
+	void __iomem *reg;
+
 	/* request fifo */
 	struct talitos_request *fifo;
 
@@ -197,9 +199,9 @@ static int reset_channel(struct device *dev, int ch)
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	unsigned int timeout = TALITOS_TIMEOUT;
 
-	setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
+	setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
 
-	while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
+	while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
 	       && --timeout)
 		cpu_relax();
 
@@ -209,12 +211,12 @@ static int reset_channel(struct device *dev, int ch)
 	}
 
 	/* set 36-bit addressing, done writeback enable and done IRQ enable */
-	setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
+	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
 		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
 
 	/* and ICCR writeback, if available */
 	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
-		setbits32(priv->reg + TALITOS_CCCR_LO(ch),
+		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 		          TALITOS_CCCR_LO_IWSE);
 
 	return 0;
@@ -328,8 +330,9 @@ static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
 
 	/* GO! */
 	wmb();
-	out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc));
-	out_be32(priv->reg + TALITOS_FF_LO(ch),
+	out_be32(priv->chan[ch].reg + TALITOS_FF,
+		 upper_32_bits(request->dma_desc));
+	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
 		 lower_32_bits(request->dma_desc));
 
 	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
@@ -423,7 +426,7 @@ static u32 current_desc_hdr(struct device *dev, int ch)
 	int tail = priv->chan[ch].tail;
 	dma_addr_t cur_desc;
 
-	cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
+	cur_desc = in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
 
 	while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
 		tail = (tail + 1) & (priv->fifo_len - 1);
@@ -445,7 +448,7 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
 	int i;
 
 	if (!desc_hdr)
-		desc_hdr = in_be32(priv->reg + TALITOS_DESCBUF(ch));
+		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
 
 	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
 	case DESC_HDR_SEL0_AFEU:
@@ -507,8 +510,8 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
 
 	for (i = 0; i < 8; i++)
 		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
-			in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
-			in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
+			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
+			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
 }
 
 /*
@@ -529,8 +532,8 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
 
 		error = -EINVAL;
 
-		v = in_be32(priv->reg + TALITOS_CCPSR(ch));
-		v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
+		v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
+		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
 
 		if (v_lo & TALITOS_CCPSR_LO_DOF) {
 			dev_err(dev, "double fetch fifo overflow error\n");
@@ -568,10 +571,10 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
 		if (reset_ch) {
 			reset_channel(dev, ch);
 		} else {
-			setbits32(priv->reg + TALITOS_CCCR(ch),
+			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 				  TALITOS_CCCR_CONT);
-			setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
-			while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
+			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
+			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 			       TALITOS_CCCR_CONT) && --timeout)
 				cpu_relax();
 			if (timeout == 0) {
@@ -2710,6 +2713,10 @@ static int talitos_probe(struct platform_device *ofdev)
 		goto err_out;
 	}
 
+	for (i = 0; i < priv->num_channels; i++)
+		priv->chan[i].reg = priv->reg + TALITOS_CH_BASE_OFFSET +
+				    TALITOS_CH_STRIDE * (i + 1);
+
 	for (i = 0; i < priv->num_channels; i++) {
 		spin_lock_init(&priv->chan[i].head_lock);
 		spin_lock_init(&priv->chan[i].tail_lock);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 0b746aca4587..3ed319da853c 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -1,7 +1,7 @@
 /*
  * Freescale SEC (talitos) device register and descriptor header defines
  *
- * Copyright (c) 2006-2010 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006-2011 Freescale Semiconductor, Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -49,13 +49,14 @@
 #define TALITOS_ICR_LO			0x101C
 
 /* channel register address stride */
+#define TALITOS_CH_BASE_OFFSET		0x1000	/* default channel map base */
 #define TALITOS_CH_STRIDE		0x100
 
 /* channel configuration register  */
-#define TALITOS_CCCR(ch)		(ch * TALITOS_CH_STRIDE + 0x1108)
+#define TALITOS_CCCR			0x8
 #define   TALITOS_CCCR_CONT		0x2    /* channel continue */
 #define   TALITOS_CCCR_RESET		0x1    /* channel reset */
-#define TALITOS_CCCR_LO(ch)		(ch * TALITOS_CH_STRIDE + 0x110c)
+#define TALITOS_CCCR_LO			0xc
 #define   TALITOS_CCCR_LO_IWSE		0x80   /* chan. ICCR writeback enab. */
 #define   TALITOS_CCCR_LO_EAE		0x20   /* extended address enable */
 #define   TALITOS_CCCR_LO_CDWE		0x10   /* chan. done writeback enab. */
@@ -63,8 +64,8 @@
 #define   TALITOS_CCCR_LO_CDIE		0x2    /* channel done IRQ enable */
 
 /* CCPSR: channel pointer status register */
-#define TALITOS_CCPSR(ch)		(ch * TALITOS_CH_STRIDE + 0x1110)
-#define TALITOS_CCPSR_LO(ch)		(ch * TALITOS_CH_STRIDE + 0x1114)
+#define TALITOS_CCPSR			0x10
+#define TALITOS_CCPSR_LO		0x14
 #define   TALITOS_CCPSR_LO_DOF		0x8000 /* double FF write oflow error */
 #define   TALITOS_CCPSR_LO_SOF		0x4000 /* single FF write oflow error */
 #define   TALITOS_CCPSR_LO_MDTE		0x2000 /* master data transfer error */
@@ -79,24 +80,24 @@
 #define   TALITOS_CCPSR_LO_SRL		0x0010 /* scatter return/length error */
 
 /* channel fetch fifo register */
-#define TALITOS_FF(ch)			(ch * TALITOS_CH_STRIDE + 0x1148)
-#define TALITOS_FF_LO(ch)		(ch * TALITOS_CH_STRIDE + 0x114c)
+#define TALITOS_FF			0x48
+#define TALITOS_FF_LO			0x4c
 
 /* current descriptor pointer register */
-#define TALITOS_CDPR(ch)		(ch * TALITOS_CH_STRIDE + 0x1140)
-#define TALITOS_CDPR_LO(ch)		(ch * TALITOS_CH_STRIDE + 0x1144)
+#define TALITOS_CDPR			0x40
+#define TALITOS_CDPR_LO			0x44
 
 /* descriptor buffer register */
-#define TALITOS_DESCBUF(ch)		(ch * TALITOS_CH_STRIDE + 0x1180)
-#define TALITOS_DESCBUF_LO(ch)		(ch * TALITOS_CH_STRIDE + 0x1184)
+#define TALITOS_DESCBUF			0x80
+#define TALITOS_DESCBUF_LO		0x84
 
 /* gather link table */
-#define TALITOS_GATHER(ch)		(ch * TALITOS_CH_STRIDE + 0x11c0)
-#define TALITOS_GATHER_LO(ch)		(ch * TALITOS_CH_STRIDE + 0x11c4)
+#define TALITOS_GATHER			0xc0
+#define TALITOS_GATHER_LO		0xc4
 
 /* scatter link table */
-#define TALITOS_SCATTER(ch)		(ch * TALITOS_CH_STRIDE + 0x11e0)
-#define TALITOS_SCATTER_LO(ch)		(ch * TALITOS_CH_STRIDE + 0x11e4)
+#define TALITOS_SCATTER			0xe0
+#define TALITOS_SCATTER_LO		0xe4
 
 /* execution unit interrupt status registers */
 #define TALITOS_DEUISR			0x2030 /* DES unit */

From c3e337f88a5b3784cb3c806ffd650d06adff1ea5 Mon Sep 17 00:00:00 2001
From: Kim Phillips <kim.phillips@freescale.com>
Date: Mon, 21 Nov 2011 16:13:27 +0800
Subject: [PATCH 33/54] crypto: talitos - support for channel remap and 2nd IRQ

Some later SEC v3.x are equipped with a second IRQ line.
By correctly assigning IRQ affinity, this feature can be
used to increase performance on dual core parts, like the
MPC8572E and P2020E.

The existence of the 2nd IRQ is determined from the device
node's interrupt property.  If present, the driver remaps
two of four channels, which in turn makes those channels
trigger their interrupts on the 2nd line instead of the first.
To handle single- and dual-IRQ combinations efficiently,
talitos gets two new interrupt handlers and back-half workers.

[includes a fix to MCR_LO's address.]

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/talitos.c | 205 ++++++++++++++++++++++++++-------------
 drivers/crypto/talitos.h |  14 ++-
 2 files changed, 148 insertions(+), 71 deletions(-)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 7f82e91e461c..92c0ca75400d 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -122,7 +122,7 @@ struct talitos_private {
 	struct device *dev;
 	struct platform_device *ofdev;
 	void __iomem *reg;
-	int irq;
+	int irq[2];
 
 	/* SEC version geometry (from device tree node) */
 	unsigned int num_channels;
@@ -146,7 +146,7 @@ struct talitos_private {
 	atomic_t last_chan ____cacheline_aligned;
 
 	/* request callback tasklet */
-	struct tasklet_struct done_task;
+	struct tasklet_struct done_task[2];
 
 	/* list of registered algorithms */
 	struct list_head alg_list;
@@ -226,13 +226,19 @@ static int reset_device(struct device *dev)
 {
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	unsigned int timeout = TALITOS_TIMEOUT;
+	u32 mcr = TALITOS_MCR_SWR;
 
-	setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
+	setbits32(priv->reg + TALITOS_MCR, mcr);
 
 	while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
 	       && --timeout)
 		cpu_relax();
 
+	if (priv->irq[1] != NO_IRQ) {
+		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
+		setbits32(priv->reg + TALITOS_MCR, mcr);
+	}
+
 	if (timeout == 0) {
 		dev_err(dev, "failed to reset device\n");
 		return -EIO;
@@ -401,21 +407,32 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
 /*
  * process completed requests for channels that have done status
  */
-static void talitos_done(unsigned long data)
-{
-	struct device *dev = (struct device *)data;
-	struct talitos_private *priv = dev_get_drvdata(dev);
-	int ch;
-
-	for (ch = 0; ch < priv->num_channels; ch++)
-		flush_channel(dev, ch, 0, 0);
-
-	/* At this point, all completed channels have been processed.
-	 * Unmask done interrupts for channels completed later on.
-	 */
-	setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
-	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
+#define DEF_TALITOS_DONE(name, ch_done_mask)				\
+static void talitos_done_##name(unsigned long data)			\
+{									\
+	struct device *dev = (struct device *)data;			\
+	struct talitos_private *priv = dev_get_drvdata(dev);		\
+									\
+	if (ch_done_mask & 1)						\
+		flush_channel(dev, 0, 0, 0);				\
+	if (priv->num_channels == 1)					\
+		goto out;						\
+	if (ch_done_mask & (1 << 2))					\
+		flush_channel(dev, 1, 0, 0);				\
+	if (ch_done_mask & (1 << 4))					\
+		flush_channel(dev, 2, 0, 0);				\
+	if (ch_done_mask & (1 << 6))					\
+		flush_channel(dev, 3, 0, 0);				\
+									\
+out:									\
+	/* At this point, all completed channels have been processed */	\
+	/* Unmask done interrupts for channels completed later on. */	\
+	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
+	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);	\
 }
+DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
+DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
+DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
 
 /*
  * locate current (offending) descriptor
@@ -584,7 +601,7 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
 			}
 		}
 	}
-	if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
+	if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
 		dev_err(dev, "done overflow, internal time out, or rngu error: "
 		        "ISR 0x%08x_%08x\n", isr, isr_lo);
 
@@ -597,30 +614,35 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
 	}
 }
 
-static irqreturn_t talitos_interrupt(int irq, void *data)
-{
-	struct device *dev = data;
-	struct talitos_private *priv = dev_get_drvdata(dev);
-	u32 isr, isr_lo;
-
-	isr = in_be32(priv->reg + TALITOS_ISR);
-	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
-	/* Acknowledge interrupt */
-	out_be32(priv->reg + TALITOS_ICR, isr);
-	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
-
-	if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
-		talitos_error((unsigned long)data, isr, isr_lo);
-	else
-		if (likely(isr & TALITOS_ISR_CHDONE)) {
-			/* mask further done interrupts. */
-			clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
-			/* done_task will unmask done interrupts at exit */
-			tasklet_schedule(&priv->done_task);
-		}
-
-	return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
+#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
+static irqreturn_t talitos_interrupt_##name(int irq, void *data)	       \
+{									       \
+	struct device *dev = data;					       \
+	struct talitos_private *priv = dev_get_drvdata(dev);		       \
+	u32 isr, isr_lo;						       \
+									       \
+	isr = in_be32(priv->reg + TALITOS_ISR);				       \
+	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
+	/* Acknowledge interrupt */					       \
+	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
+	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
+									       \
+	if (unlikely((isr & ~TALITOS_ISR_4CHDONE) & ch_err_mask || isr_lo))    \
+		talitos_error((unsigned long)data, isr, isr_lo);	       \
+	else								       \
+		if (likely(isr & ch_done_mask)) {			       \
+			/* mask further done interrupts. */		       \
+			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
+			/* done_task will unmask done interrupts at exit */    \
+			tasklet_schedule(&priv->done_task[tlet]);	       \
+		}							       \
+									       \
+	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
+								IRQ_NONE;      \
 }
+DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
+DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
+DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
 
 /*
  * hwrng
@@ -2558,12 +2580,15 @@ static int talitos_remove(struct platform_device *ofdev)
 
 	kfree(priv->chan);
 
-	if (priv->irq != NO_IRQ) {
-		free_irq(priv->irq, dev);
-		irq_dispose_mapping(priv->irq);
-	}
+	for (i = 0; i < 2; i++)
+		if (priv->irq[i] != NO_IRQ) {
+			free_irq(priv->irq[i], dev);
+			irq_dispose_mapping(priv->irq[i]);
+		}
 
-	tasklet_kill(&priv->done_task);
+	tasklet_kill(&priv->done_task[0]);
+	if (priv->irq[1] != NO_IRQ)
+		tasklet_kill(&priv->done_task[1]);
 
 	iounmap(priv->reg);
 
@@ -2628,6 +2653,54 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
 	return t_alg;
 }
 
+static int talitos_probe_irq(struct platform_device *ofdev)
+{
+	struct device *dev = &ofdev->dev;
+	struct device_node *np = ofdev->dev.of_node;
+	struct talitos_private *priv = dev_get_drvdata(dev);
+	int err;
+
+	priv->irq[0] = irq_of_parse_and_map(np, 0);
+	if (priv->irq[0] == NO_IRQ) {
+		dev_err(dev, "failed to map irq\n");
+		return -EINVAL;
+	}
+
+	priv->irq[1] = irq_of_parse_and_map(np, 1);
+
+	/* get the primary irq line */
+	if (priv->irq[1] == NO_IRQ) {
+		err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
+				  dev_driver_string(dev), dev);
+		goto primary_out;
+	}
+
+	err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
+			  dev_driver_string(dev), dev);
+	if (err)
+		goto primary_out;
+
+	/* get the secondary irq line */
+	err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
+			  dev_driver_string(dev), dev);
+	if (err) {
+		dev_err(dev, "failed to request secondary irq\n");
+		irq_dispose_mapping(priv->irq[1]);
+		priv->irq[1] = NO_IRQ;
+	}
+
+	return err;
+
+primary_out:
+	if (err) {
+		dev_err(dev, "failed to request primary irq\n");
+		irq_dispose_mapping(priv->irq[0]);
+		priv->irq[0] = NO_IRQ;
+	}
+
+	return err;
+}
+
 static int talitos_probe(struct platform_device *ofdev)
 {
 	struct device *dev = &ofdev->dev;
@@ -2644,28 +2717,22 @@ static int talitos_probe(struct platform_device *ofdev)
 
 	priv->ofdev = ofdev;
 
-	tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
+	err = talitos_probe_irq(ofdev);
+	if (err)
+		goto err_out;
+
+	if (priv->irq[1] == NO_IRQ) {
+		tasklet_init(&priv->done_task[0], talitos_done_4ch,
+			     (unsigned long)dev);
+	} else {
+		tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
+			     (unsigned long)dev);
+		tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
+			     (unsigned long)dev);
+	}
 
 	INIT_LIST_HEAD(&priv->alg_list);
 
-	priv->irq = irq_of_parse_and_map(np, 0);
-
-	if (priv->irq == NO_IRQ) {
-		dev_err(dev, "failed to map irq\n");
-		err = -EINVAL;
-		goto err_out;
-	}
-
-	/* get the irq line */
-	err = request_irq(priv->irq, talitos_interrupt, 0,
-			  dev_driver_string(dev), dev);
-	if (err) {
-		dev_err(dev, "failed to request irq %d\n", priv->irq);
-		irq_dispose_mapping(priv->irq);
-		priv->irq = NO_IRQ;
-		goto err_out;
-	}
-
 	priv->reg = of_iomap(np, 0);
 	if (!priv->reg) {
 		dev_err(dev, "failed to of_iomap\n");
@@ -2713,9 +2780,11 @@ static int talitos_probe(struct platform_device *ofdev)
 		goto err_out;
 	}
 
-	for (i = 0; i < priv->num_channels; i++)
-		priv->chan[i].reg = priv->reg + TALITOS_CH_BASE_OFFSET +
-				    TALITOS_CH_STRIDE * (i + 1);
+	for (i = 0; i < priv->num_channels; i++) {
+		priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
+		if ((priv->irq[1] == NO_IRQ) || !(i & 1))
+			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
+	}
 
 	for (i = 0; i < priv->num_channels; i++) {
 		spin_lock_init(&priv->chan[i].head_lock);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 3ed319da853c..3c173954ef29 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -34,16 +34,24 @@
 
 /* global register offset addresses */
 #define TALITOS_MCR			0x1030  /* master control register */
-#define TALITOS_MCR_LO			0x1038
+#define   TALITOS_MCR_RCA0		(1 << 15) /* remap channel 0 */
+#define   TALITOS_MCR_RCA1		(1 << 14) /* remap channel 1 */
+#define   TALITOS_MCR_RCA2		(1 << 13) /* remap channel 2 */
+#define   TALITOS_MCR_RCA3		(1 << 12) /* remap channel 3 */
 #define   TALITOS_MCR_SWR		0x1     /* s/w reset */
+#define TALITOS_MCR_LO			0x1034
 #define TALITOS_IMR			0x1008  /* interrupt mask register */
 #define   TALITOS_IMR_INIT		0x100ff /* enable channel IRQs */
 #define   TALITOS_IMR_DONE		0x00055 /* done IRQs */
 #define TALITOS_IMR_LO			0x100C
 #define   TALITOS_IMR_LO_INIT		0x20000 /* allow RNGU error IRQs */
 #define TALITOS_ISR			0x1010  /* interrupt status register */
-#define   TALITOS_ISR_CHERR		0xaa    /* channel errors mask */
-#define   TALITOS_ISR_CHDONE		0x55    /* channel done mask */
+#define   TALITOS_ISR_4CHERR		0xaa    /* 4 channel errors mask */
+#define   TALITOS_ISR_4CHDONE		0x55    /* 4 channel done mask */
+#define   TALITOS_ISR_CH_0_2_ERR	0x22    /* channels 0, 2 errors mask */
+#define   TALITOS_ISR_CH_0_2_DONE	0x11    /* channels 0, 2 done mask */
+#define   TALITOS_ISR_CH_1_3_ERR	0x88    /* channels 1, 3 errors mask */
+#define   TALITOS_ISR_CH_1_3_DONE	0x44    /* channels 1, 3 done mask */
 #define TALITOS_ISR_LO			0x1014
 #define TALITOS_ICR			0x1018  /* interrupt clear register */
 #define TALITOS_ICR_LO			0x101C

From 8b4d43a4e774c02dee359c7931e47b119143f367 Mon Sep 17 00:00:00 2001
From: Kim Phillips <kim.phillips@freescale.com>
Date: Mon, 21 Nov 2011 16:13:27 +0800
Subject: [PATCH 34/54] crypto: caam - add support for MD5 algorithm variants

specifically, add these algorithm combinations:

authenc-hmac-md5-cbc-aes-caam
authenc-hmac-md5-cbc-des3_ede-caam
authenc-hmac-md5-cbc-des-caam

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/caam/caamalg.c | 57 +++++++++++++++++++++++++++++++++++
 drivers/crypto/caam/compat.h  |  1 +
 2 files changed, 58 insertions(+)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4159265b453b..a5bc706f57bf 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1805,6 +1805,25 @@ struct caam_alg_template {
 
 static struct caam_alg_template driver_algs[] = {
 	/* single-pass ipsec_esp descriptor */
+	{
+		.name = "authenc(hmac(md5),cbc(aes))",
+		.driver_name = "authenc-hmac-md5-cbc-aes-caam",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+	},
 	{
 		.name = "authenc(hmac(sha1),cbc(aes))",
 		.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
@@ -1864,6 +1883,25 @@ static struct caam_alg_template driver_algs[] = {
 				   OP_ALG_AAI_HMAC_PRECOMP,
 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 	},
+	{
+		.name = "authenc(hmac(md5),cbc(des3_ede))",
+		.driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+	},
 	{
 		.name = "authenc(hmac(sha1),cbc(des3_ede))",
 		.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
@@ -1923,6 +1961,25 @@ static struct caam_alg_template driver_algs[] = {
 				   OP_ALG_AAI_HMAC_PRECOMP,
 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 	},
+	{
+		.name = "authenc(hmac(md5),cbc(des))",
+		.driver_name = "authenc-hmac-md5-cbc-des-caam",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+	},
 	{
 		.name = "authenc(hmac(sha1),cbc(des))",
 		.driver_name = "authenc-hmac-sha1-cbc-des-caam",
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index d38f2afaa966..a63bc65fae86 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -28,6 +28,7 @@
 #include <crypto/aes.h>
 #include <crypto/des.h>
 #include <crypto/sha.h>
+#include <crypto/md5.h>
 #include <crypto/aead.h>
 #include <crypto/authenc.h>
 #include <crypto/scatterwalk.h>

From 7f4e3e3fa5ba9fb54b280e959bc2a88c42eec76a Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Wed, 23 Nov 2011 12:21:00 +0200
Subject: [PATCH 35/54] crypto: serpent - cleanup checkpatch errors and
 warnings

Do checkpatch fixes before rename to keep rename patch simple and clean.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/serpent.c | 595 +++++++++++++++++++++++++++--------------------
 1 file changed, 340 insertions(+), 255 deletions(-)

diff --git a/crypto/serpent.c b/crypto/serpent.c
index 7cc2caeb88a5..8f32cf35e5ce 100644
--- a/crypto/serpent.c
+++ b/crypto/serpent.c
@@ -6,8 +6,9 @@
  * Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
  *               2003 Herbert Valerio Riedel <hvr@gnu.org>
  *
- * Added tnepres support: Ruben Jesus Garcia Hernandez <ruben@ugr.es>, 18.10.2004
- *               Based on code by hvr
+ * Added tnepres support:
+ *		Ruben Jesus Garcia Hernandez <ruben@ugr.es>, 18.10.2004
+ *              Based on code by hvr
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -29,189 +30,211 @@
 
 #define PHI 0x9e3779b9UL
 
-#define keyiter(a,b,c,d,i,j) \
-        b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b,11); k[j] = b;
+#define keyiter(a, b, c, d, i, j) \
+	({ b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b, 11); k[j] = b; })
 
-#define loadkeys(x0,x1,x2,x3,i) \
-	x0=k[i]; x1=k[i+1]; x2=k[i+2]; x3=k[i+3];
+#define loadkeys(x0, x1, x2, x3, i) \
+	({ x0 = k[i]; x1 = k[i+1]; x2 = k[i+2]; x3 = k[i+3]; })
 
-#define storekeys(x0,x1,x2,x3,i) \
-	k[i]=x0; k[i+1]=x1; k[i+2]=x2; k[i+3]=x3;
+#define storekeys(x0, x1, x2, x3, i) \
+	({ k[i] = x0; k[i+1] = x1; k[i+2] = x2; k[i+3] = x3; })
 
-#define K(x0,x1,x2,x3,i)				\
+#define store_and_load_keys(x0, x1, x2, x3, s, l) \
+	({ storekeys(x0, x1, x2, x3, s); loadkeys(x0, x1, x2, x3, l); })
+
+#define K(x0, x1, x2, x3, i) ({				\
 	x3 ^= k[4*(i)+3];        x2 ^= k[4*(i)+2];	\
-	x1 ^= k[4*(i)+1];        x0 ^= k[4*(i)+0];
+	x1 ^= k[4*(i)+1];        x0 ^= k[4*(i)+0];	\
+	})
 
-#define LK(x0,x1,x2,x3,x4,i)				\
-					x0=rol32(x0,13);\
-	x2=rol32(x2,3);	x1 ^= x0;	x4  = x0 << 3;	\
-	x3 ^= x2;	x1 ^= x2;			\
-	x1=rol32(x1,1);	x3 ^= x4;			\
-	x3=rol32(x3,7);	x4  = x1;			\
-	x0 ^= x1;	x4 <<= 7;	x2 ^= x3;	\
-	x0 ^= x3;	x2 ^= x4;	x3 ^= k[4*i+3];	\
-	x1 ^= k[4*i+1];	x0=rol32(x0,5);	x2=rol32(x2,22);\
-	x0 ^= k[4*i+0];	x2 ^= k[4*i+2];
+#define LK(x0, x1, x2, x3, x4, i) ({					   \
+							x0 = rol32(x0, 13);\
+	x2 = rol32(x2, 3);	x1 ^= x0;		x4  = x0 << 3;	   \
+	x3 ^= x2;		x1 ^= x2;				   \
+	x1 = rol32(x1, 1);	x3 ^= x4;				   \
+	x3 = rol32(x3, 7);	x4  = x1;				   \
+	x0 ^= x1;		x4 <<= 7;		x2 ^= x3;	   \
+	x0 ^= x3;		x2 ^= x4;		x3 ^= k[4*i+3];	   \
+	x1 ^= k[4*i+1];		x0 = rol32(x0, 5);	x2 = rol32(x2, 22);\
+	x0 ^= k[4*i+0];		x2 ^= k[4*i+2];				   \
+	})
 
-#define KL(x0,x1,x2,x3,x4,i)				\
-	x0 ^= k[4*i+0];	x1 ^= k[4*i+1];	x2 ^= k[4*i+2];	\
-	x3 ^= k[4*i+3];	x0=ror32(x0,5);	x2=ror32(x2,22);\
-	x4 =  x1;	x2 ^= x3;	x0 ^= x3;	\
-	x4 <<= 7;	x0 ^= x1;	x1=ror32(x1,1);	\
-	x2 ^= x4;	x3=ror32(x3,7);	x4 = x0 << 3;	\
-	x1 ^= x0;	x3 ^= x4;	x0=ror32(x0,13);\
-	x1 ^= x2;	x3 ^= x2;	x2=ror32(x2,3);
+#define KL(x0, x1, x2, x3, x4, i) ({					   \
+	x0 ^= k[4*i+0];		x1 ^= k[4*i+1];		x2 ^= k[4*i+2];	   \
+	x3 ^= k[4*i+3];		x0 = ror32(x0, 5);	x2 = ror32(x2, 22);\
+	x4 =  x1;		x2 ^= x3;		x0 ^= x3;	   \
+	x4 <<= 7;		x0 ^= x1;		x1 = ror32(x1, 1); \
+	x2 ^= x4;		x3 = ror32(x3, 7);	x4 = x0 << 3;	   \
+	x1 ^= x0;		x3 ^= x4;		x0 = ror32(x0, 13);\
+	x1 ^= x2;		x3 ^= x2;		x2 = ror32(x2, 3); \
+	})
 
-#define S0(x0,x1,x2,x3,x4)				\
+#define S0(x0, x1, x2, x3, x4) ({			\
 					x4  = x3;	\
 	x3 |= x0;	x0 ^= x4;	x4 ^= x2;	\
-	x4 =~ x4;	x3 ^= x1;	x1 &= x0;	\
+	x4 = ~x4;	x3 ^= x1;	x1 &= x0;	\
 	x1 ^= x4;	x2 ^= x0;	x0 ^= x3;	\
 	x4 |= x0;	x0 ^= x2;	x2 &= x1;	\
-	x3 ^= x2;	x1 =~ x1;	x2 ^= x4;	\
-	x1 ^= x2;
+	x3 ^= x2;	x1 = ~x1;	x2 ^= x4;	\
+	x1 ^= x2;					\
+	})
 
-#define S1(x0,x1,x2,x3,x4)				\
+#define S1(x0, x1, x2, x3, x4) ({			\
 					x4  = x1;	\
-	x1 ^= x0;	x0 ^= x3;	x3 =~ x3;	\
+	x1 ^= x0;	x0 ^= x3;	x3 = ~x3;	\
 	x4 &= x1;	x0 |= x1;	x3 ^= x2;	\
 	x0 ^= x3;	x1 ^= x3;	x3 ^= x4;	\
 	x1 |= x4;	x4 ^= x2;	x2 &= x0;	\
-	x2 ^= x1;	x1 |= x0;	x0 =~ x0;	\
-	x0 ^= x2;	x4 ^= x1;
+	x2 ^= x1;	x1 |= x0;	x0 = ~x0;	\
+	x0 ^= x2;	x4 ^= x1;			\
+	})
 
-#define S2(x0,x1,x2,x3,x4)				\
-					x3 =~ x3;	\
+#define S2(x0, x1, x2, x3, x4) ({			\
+					x3 = ~x3;	\
 	x1 ^= x0;	x4  = x0;	x0 &= x2;	\
 	x0 ^= x3;	x3 |= x4;	x2 ^= x1;	\
 	x3 ^= x1;	x1 &= x0;	x0 ^= x2;	\
-	x2 &= x3;	x3 |= x1;	x0 =~ x0;	\
+	x2 &= x3;	x3 |= x1;	x0 = ~x0;	\
 	x3 ^= x0;	x4 ^= x0;	x0 ^= x2;	\
-	x1 |= x2;
+	x1 |= x2;					\
+	})
 
-#define S3(x0,x1,x2,x3,x4)				\
+#define S3(x0, x1, x2, x3, x4) ({			\
 					x4  = x1;	\
 	x1 ^= x3;	x3 |= x0;	x4 &= x0;	\
 	x0 ^= x2;	x2 ^= x1;	x1 &= x3;	\
 	x2 ^= x3;	x0 |= x4;	x4 ^= x3;	\
 	x1 ^= x0;	x0 &= x3;	x3 &= x4;	\
 	x3 ^= x2;	x4 |= x1;	x2 &= x1;	\
-	x4 ^= x3;	x0 ^= x3;	x3 ^= x2;
+	x4 ^= x3;	x0 ^= x3;	x3 ^= x2;	\
+	})
 
-#define S4(x0,x1,x2,x3,x4)				\
+#define S4(x0, x1, x2, x3, x4) ({			\
 					x4  = x3;	\
 	x3 &= x0;	x0 ^= x4;			\
 	x3 ^= x2;	x2 |= x4;	x0 ^= x1;	\
 	x4 ^= x3;	x2 |= x0;			\
 	x2 ^= x1;	x1 &= x0;			\
 	x1 ^= x4;	x4 &= x2;	x2 ^= x3;	\
-	x4 ^= x0;	x3 |= x1;	x1 =~ x1;	\
-	x3 ^= x0;
+	x4 ^= x0;	x3 |= x1;	x1 = ~x1;	\
+	x3 ^= x0;					\
+	})
 
-#define S5(x0,x1,x2,x3,x4)				\
+#define S5(x0, x1, x2, x3, x4) ({			\
 	x4  = x1;	x1 |= x0;			\
-	x2 ^= x1;	x3 =~ x3;	x4 ^= x0;	\
+	x2 ^= x1;	x3 = ~x3;	x4 ^= x0;	\
 	x0 ^= x2;	x1 &= x4;	x4 |= x3;	\
 	x4 ^= x0;	x0 &= x3;	x1 ^= x3;	\
 	x3 ^= x2;	x0 ^= x1;	x2 &= x4;	\
 	x1 ^= x2;	x2 &= x0;			\
-	x3 ^= x2;
+	x3 ^= x2;					\
+	})
 
-#define S6(x0,x1,x2,x3,x4)				\
+#define S6(x0, x1, x2, x3, x4) ({			\
 					x4  = x1;	\
 	x3 ^= x0;	x1 ^= x2;	x2 ^= x0;	\
-	x0 &= x3;	x1 |= x3;	x4 =~ x4;	\
+	x0 &= x3;	x1 |= x3;	x4 = ~x4;	\
 	x0 ^= x1;	x1 ^= x2;			\
 	x3 ^= x4;	x4 ^= x0;	x2 &= x0;	\
 	x4 ^= x1;	x2 ^= x3;	x3 &= x1;	\
-	x3 ^= x0;	x1 ^= x2;
+	x3 ^= x0;	x1 ^= x2;			\
+	})
 
-#define S7(x0,x1,x2,x3,x4)				\
-					x1 =~ x1;	\
-	x4  = x1;	x0 =~ x0;	x1 &= x2;	\
+#define S7(x0, x1, x2, x3, x4) ({			\
+					x1 = ~x1;	\
+	x4  = x1;	x0 = ~x0;	x1 &= x2;	\
 	x1 ^= x3;	x3 |= x4;	x4 ^= x2;	\
 	x2 ^= x3;	x3 ^= x0;	x0 |= x1;	\
 	x2 &= x0;	x0 ^= x4;	x4 ^= x3;	\
 	x3 &= x0;	x4 ^= x1;			\
 	x2 ^= x4;	x3 ^= x1;	x4 |= x0;	\
-	x4 ^= x1;
+	x4 ^= x1;					\
+	})
 
-#define SI0(x0,x1,x2,x3,x4)				\
+#define SI0(x0, x1, x2, x3, x4) ({			\
 			x4  = x3;	x1 ^= x0;	\
-	x3 |= x1;	x4 ^= x1;	x0 =~ x0;	\
+	x3 |= x1;	x4 ^= x1;	x0 = ~x0;	\
 	x2 ^= x3;	x3 ^= x0;	x0 &= x1;	\
 	x0 ^= x2;	x2 &= x3;	x3 ^= x4;	\
 	x2 ^= x3;	x1 ^= x3;	x3 &= x0;	\
-	x1 ^= x0;	x0 ^= x2;	x4 ^= x3;
+	x1 ^= x0;	x0 ^= x2;	x4 ^= x3;	\
+	})
 
-#define SI1(x0,x1,x2,x3,x4)				\
+#define SI1(x0, x1, x2, x3, x4) ({			\
 	x1 ^= x3;	x4  = x0;			\
-	x0 ^= x2;	x2 =~ x2;	x4 |= x1;	\
+	x0 ^= x2;	x2 = ~x2;	x4 |= x1;	\
 	x4 ^= x3;	x3 &= x1;	x1 ^= x2;	\
 	x2 &= x4;	x4 ^= x1;	x1 |= x3;	\
 	x3 ^= x0;	x2 ^= x0;	x0 |= x4;	\
 	x2 ^= x4;	x1 ^= x0;			\
-	x4 ^= x1;
+	x4 ^= x1;					\
+	})
 
-#define SI2(x0,x1,x2,x3,x4)				\
-	x2 ^= x1;	x4  = x3;	x3 =~ x3;	\
+#define SI2(x0, x1, x2, x3, x4) ({			\
+	x2 ^= x1;	x4  = x3;	x3 = ~x3;	\
 	x3 |= x2;	x2 ^= x4;	x4 ^= x0;	\
 	x3 ^= x1;	x1 |= x2;	x2 ^= x0;	\
 	x1 ^= x4;	x4 |= x3;	x2 ^= x3;	\
 	x4 ^= x2;	x2 &= x1;			\
-	x2 ^= x3;	x3 ^= x4;	x4 ^= x0;
+	x2 ^= x3;	x3 ^= x4;	x4 ^= x0;	\
+	})
 
-#define SI3(x0,x1,x2,x3,x4)				\
+#define SI3(x0, x1, x2, x3, x4) ({			\
 					x2 ^= x1;	\
 	x4  = x1;	x1 &= x2;			\
 	x1 ^= x0;	x0 |= x4;	x4 ^= x3;	\
 	x0 ^= x3;	x3 |= x1;	x1 ^= x2;	\
 	x1 ^= x3;	x0 ^= x2;	x2 ^= x3;	\
 	x3 &= x1;	x1 ^= x0;	x0 &= x2;	\
-	x4 ^= x3;	x3 ^= x0;	x0 ^= x1;
+	x4 ^= x3;	x3 ^= x0;	x0 ^= x1;	\
+	})
 
-#define SI4(x0,x1,x2,x3,x4)				\
+#define SI4(x0, x1, x2, x3, x4) ({			\
 	x2 ^= x3;	x4  = x0;	x0 &= x1;	\
-	x0 ^= x2;	x2 |= x3;	x4 =~ x4;	\
+	x0 ^= x2;	x2 |= x3;	x4 = ~x4;	\
 	x1 ^= x0;	x0 ^= x2;	x2 &= x4;	\
 	x2 ^= x0;	x0 |= x4;			\
 	x0 ^= x3;	x3 &= x2;			\
 	x4 ^= x3;	x3 ^= x1;	x1 &= x0;	\
-	x4 ^= x1;	x0 ^= x3;
+	x4 ^= x1;	x0 ^= x3;			\
+	})
 
-#define SI5(x0,x1,x2,x3,x4)				\
+#define SI5(x0, x1, x2, x3, x4) ({			\
 			x4  = x1;	x1 |= x2;	\
 	x2 ^= x4;	x1 ^= x3;	x3 &= x4;	\
-	x2 ^= x3;	x3 |= x0;	x0 =~ x0;	\
+	x2 ^= x3;	x3 |= x0;	x0 = ~x0;	\
 	x3 ^= x2;	x2 |= x0;	x4 ^= x1;	\
 	x2 ^= x4;	x4 &= x0;	x0 ^= x1;	\
 	x1 ^= x3;	x0 &= x2;	x2 ^= x3;	\
-	x0 ^= x2;	x2 ^= x4;	x4 ^= x3;
+	x0 ^= x2;	x2 ^= x4;	x4 ^= x3;	\
+	})
 
-#define SI6(x0,x1,x2,x3,x4)				\
+#define SI6(x0, x1, x2, x3, x4) ({			\
 			x0 ^= x2;			\
 	x4  = x0;	x0 &= x3;	x2 ^= x3;	\
 	x0 ^= x2;	x3 ^= x1;	x2 |= x4;	\
-	x2 ^= x3;	x3 &= x0;	x0 =~ x0;	\
+	x2 ^= x3;	x3 &= x0;	x0 = ~x0;	\
 	x3 ^= x1;	x1 &= x2;	x4 ^= x0;	\
 	x3 ^= x4;	x4 ^= x2;	x0 ^= x1;	\
-	x2 ^= x0;
+	x2 ^= x0;					\
+	})
 
-#define SI7(x0,x1,x2,x3,x4)				\
+#define SI7(x0, x1, x2, x3, x4) ({			\
 	x4  = x3;	x3 &= x0;	x0 ^= x2;	\
-	x2 |= x4;	x4 ^= x1;	x0 =~ x0;	\
+	x2 |= x4;	x4 ^= x1;	x0 = ~x0;	\
 	x1 |= x3;	x4 ^= x0;	x0 &= x2;	\
 	x0 ^= x1;	x1 &= x2;	x3 ^= x2;	\
 	x4 ^= x3;	x2 &= x3;	x3 |= x0;	\
 	x1 ^= x4;	x3 ^= x4;	x4 &= x0;	\
-	x4 ^= x2;
+	x4 ^= x2;					\
+	})
 
 int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
 		     unsigned int keylen)
 {
 	u32 *k = ctx->expkey;
 	u8  *k8 = (u8 *)k;
-	u32 r0,r1,r2,r3,r4;
+	u32 r0, r1, r2, r3, r4;
 	int i;
 
 	/* Copy key, add padding */
@@ -231,121 +254,183 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
 	r3 = le32_to_cpu(k[6]);
 	r4 = le32_to_cpu(k[7]);
 
-	keyiter(le32_to_cpu(k[0]),r0,r4,r2,0,0);
-	keyiter(le32_to_cpu(k[1]),r1,r0,r3,1,1);
-	keyiter(le32_to_cpu(k[2]),r2,r1,r4,2,2);
-	keyiter(le32_to_cpu(k[3]),r3,r2,r0,3,3);
-	keyiter(le32_to_cpu(k[4]),r4,r3,r1,4,4);
-	keyiter(le32_to_cpu(k[5]),r0,r4,r2,5,5);
-	keyiter(le32_to_cpu(k[6]),r1,r0,r3,6,6);
-	keyiter(le32_to_cpu(k[7]),r2,r1,r4,7,7);
+	keyiter(le32_to_cpu(k[0]), r0, r4, r2, 0, 0);
+	keyiter(le32_to_cpu(k[1]), r1, r0, r3, 1, 1);
+	keyiter(le32_to_cpu(k[2]), r2, r1, r4, 2, 2);
+	keyiter(le32_to_cpu(k[3]), r3, r2, r0, 3, 3);
+	keyiter(le32_to_cpu(k[4]), r4, r3, r1, 4, 4);
+	keyiter(le32_to_cpu(k[5]), r0, r4, r2, 5, 5);
+	keyiter(le32_to_cpu(k[6]), r1, r0, r3, 6, 6);
+	keyiter(le32_to_cpu(k[7]), r2, r1, r4, 7, 7);
 
-	keyiter(k[  0],r3,r2,r0,  8,  8); keyiter(k[  1],r4,r3,r1,  9,  9);
-	keyiter(k[  2],r0,r4,r2, 10, 10); keyiter(k[  3],r1,r0,r3, 11, 11);
-	keyiter(k[  4],r2,r1,r4, 12, 12); keyiter(k[  5],r3,r2,r0, 13, 13);
-	keyiter(k[  6],r4,r3,r1, 14, 14); keyiter(k[  7],r0,r4,r2, 15, 15);
-	keyiter(k[  8],r1,r0,r3, 16, 16); keyiter(k[  9],r2,r1,r4, 17, 17);
-	keyiter(k[ 10],r3,r2,r0, 18, 18); keyiter(k[ 11],r4,r3,r1, 19, 19);
-	keyiter(k[ 12],r0,r4,r2, 20, 20); keyiter(k[ 13],r1,r0,r3, 21, 21);
-	keyiter(k[ 14],r2,r1,r4, 22, 22); keyiter(k[ 15],r3,r2,r0, 23, 23);
-	keyiter(k[ 16],r4,r3,r1, 24, 24); keyiter(k[ 17],r0,r4,r2, 25, 25);
-	keyiter(k[ 18],r1,r0,r3, 26, 26); keyiter(k[ 19],r2,r1,r4, 27, 27);
-	keyiter(k[ 20],r3,r2,r0, 28, 28); keyiter(k[ 21],r4,r3,r1, 29, 29);
-	keyiter(k[ 22],r0,r4,r2, 30, 30); keyiter(k[ 23],r1,r0,r3, 31, 31);
+	keyiter(k[0], r3, r2, r0, 8, 8);
+	keyiter(k[1], r4, r3, r1, 9, 9);
+	keyiter(k[2], r0, r4, r2, 10, 10);
+	keyiter(k[3], r1, r0, r3, 11, 11);
+	keyiter(k[4], r2, r1, r4, 12, 12);
+	keyiter(k[5], r3, r2, r0, 13, 13);
+	keyiter(k[6], r4, r3, r1, 14, 14);
+	keyiter(k[7], r0, r4, r2, 15, 15);
+	keyiter(k[8], r1, r0, r3, 16, 16);
+	keyiter(k[9], r2, r1, r4, 17, 17);
+	keyiter(k[10], r3, r2, r0, 18, 18);
+	keyiter(k[11], r4, r3, r1, 19, 19);
+	keyiter(k[12], r0, r4, r2, 20, 20);
+	keyiter(k[13], r1, r0, r3, 21, 21);
+	keyiter(k[14], r2, r1, r4, 22, 22);
+	keyiter(k[15], r3, r2, r0, 23, 23);
+	keyiter(k[16], r4, r3, r1, 24, 24);
+	keyiter(k[17], r0, r4, r2, 25, 25);
+	keyiter(k[18], r1, r0, r3, 26, 26);
+	keyiter(k[19], r2, r1, r4, 27, 27);
+	keyiter(k[20], r3, r2, r0, 28, 28);
+	keyiter(k[21], r4, r3, r1, 29, 29);
+	keyiter(k[22], r0, r4, r2, 30, 30);
+	keyiter(k[23], r1, r0, r3, 31, 31);
 
 	k += 50;
 
-	keyiter(k[-26],r2,r1,r4, 32,-18); keyiter(k[-25],r3,r2,r0, 33,-17);
-	keyiter(k[-24],r4,r3,r1, 34,-16); keyiter(k[-23],r0,r4,r2, 35,-15);
-	keyiter(k[-22],r1,r0,r3, 36,-14); keyiter(k[-21],r2,r1,r4, 37,-13);
-	keyiter(k[-20],r3,r2,r0, 38,-12); keyiter(k[-19],r4,r3,r1, 39,-11);
-	keyiter(k[-18],r0,r4,r2, 40,-10); keyiter(k[-17],r1,r0,r3, 41, -9);
-	keyiter(k[-16],r2,r1,r4, 42, -8); keyiter(k[-15],r3,r2,r0, 43, -7);
-	keyiter(k[-14],r4,r3,r1, 44, -6); keyiter(k[-13],r0,r4,r2, 45, -5);
-	keyiter(k[-12],r1,r0,r3, 46, -4); keyiter(k[-11],r2,r1,r4, 47, -3);
-	keyiter(k[-10],r3,r2,r0, 48, -2); keyiter(k[ -9],r4,r3,r1, 49, -1);
-	keyiter(k[ -8],r0,r4,r2, 50,  0); keyiter(k[ -7],r1,r0,r3, 51,  1);
-	keyiter(k[ -6],r2,r1,r4, 52,  2); keyiter(k[ -5],r3,r2,r0, 53,  3);
-	keyiter(k[ -4],r4,r3,r1, 54,  4); keyiter(k[ -3],r0,r4,r2, 55,  5);
-	keyiter(k[ -2],r1,r0,r3, 56,  6); keyiter(k[ -1],r2,r1,r4, 57,  7);
-	keyiter(k[  0],r3,r2,r0, 58,  8); keyiter(k[  1],r4,r3,r1, 59,  9);
-	keyiter(k[  2],r0,r4,r2, 60, 10); keyiter(k[  3],r1,r0,r3, 61, 11);
-	keyiter(k[  4],r2,r1,r4, 62, 12); keyiter(k[  5],r3,r2,r0, 63, 13);
-	keyiter(k[  6],r4,r3,r1, 64, 14); keyiter(k[  7],r0,r4,r2, 65, 15);
-	keyiter(k[  8],r1,r0,r3, 66, 16); keyiter(k[  9],r2,r1,r4, 67, 17);
-	keyiter(k[ 10],r3,r2,r0, 68, 18); keyiter(k[ 11],r4,r3,r1, 69, 19);
-	keyiter(k[ 12],r0,r4,r2, 70, 20); keyiter(k[ 13],r1,r0,r3, 71, 21);
-	keyiter(k[ 14],r2,r1,r4, 72, 22); keyiter(k[ 15],r3,r2,r0, 73, 23);
-	keyiter(k[ 16],r4,r3,r1, 74, 24); keyiter(k[ 17],r0,r4,r2, 75, 25);
-	keyiter(k[ 18],r1,r0,r3, 76, 26); keyiter(k[ 19],r2,r1,r4, 77, 27);
-	keyiter(k[ 20],r3,r2,r0, 78, 28); keyiter(k[ 21],r4,r3,r1, 79, 29);
-	keyiter(k[ 22],r0,r4,r2, 80, 30); keyiter(k[ 23],r1,r0,r3, 81, 31);
+	keyiter(k[-26], r2, r1, r4, 32, -18);
+	keyiter(k[-25], r3, r2, r0, 33, -17);
+	keyiter(k[-24], r4, r3, r1, 34, -16);
+	keyiter(k[-23], r0, r4, r2, 35, -15);
+	keyiter(k[-22], r1, r0, r3, 36, -14);
+	keyiter(k[-21], r2, r1, r4, 37, -13);
+	keyiter(k[-20], r3, r2, r0, 38, -12);
+	keyiter(k[-19], r4, r3, r1, 39, -11);
+	keyiter(k[-18], r0, r4, r2, 40, -10);
+	keyiter(k[-17], r1, r0, r3, 41, -9);
+	keyiter(k[-16], r2, r1, r4, 42, -8);
+	keyiter(k[-15], r3, r2, r0, 43, -7);
+	keyiter(k[-14], r4, r3, r1, 44, -6);
+	keyiter(k[-13], r0, r4, r2, 45, -5);
+	keyiter(k[-12], r1, r0, r3, 46, -4);
+	keyiter(k[-11], r2, r1, r4, 47, -3);
+	keyiter(k[-10], r3, r2, r0, 48, -2);
+	keyiter(k[-9], r4, r3, r1, 49, -1);
+	keyiter(k[-8], r0, r4, r2, 50, 0);
+	keyiter(k[-7], r1, r0, r3, 51, 1);
+	keyiter(k[-6], r2, r1, r4, 52, 2);
+	keyiter(k[-5], r3, r2, r0, 53, 3);
+	keyiter(k[-4], r4, r3, r1, 54, 4);
+	keyiter(k[-3], r0, r4, r2, 55, 5);
+	keyiter(k[-2], r1, r0, r3, 56, 6);
+	keyiter(k[-1], r2, r1, r4, 57, 7);
+	keyiter(k[0], r3, r2, r0, 58, 8);
+	keyiter(k[1], r4, r3, r1, 59, 9);
+	keyiter(k[2], r0, r4, r2, 60, 10);
+	keyiter(k[3], r1, r0, r3, 61, 11);
+	keyiter(k[4], r2, r1, r4, 62, 12);
+	keyiter(k[5], r3, r2, r0, 63, 13);
+	keyiter(k[6], r4, r3, r1, 64, 14);
+	keyiter(k[7], r0, r4, r2, 65, 15);
+	keyiter(k[8], r1, r0, r3, 66, 16);
+	keyiter(k[9], r2, r1, r4, 67, 17);
+	keyiter(k[10], r3, r2, r0, 68, 18);
+	keyiter(k[11], r4, r3, r1, 69, 19);
+	keyiter(k[12], r0, r4, r2, 70, 20);
+	keyiter(k[13], r1, r0, r3, 71, 21);
+	keyiter(k[14], r2, r1, r4, 72, 22);
+	keyiter(k[15], r3, r2, r0, 73, 23);
+	keyiter(k[16], r4, r3, r1, 74, 24);
+	keyiter(k[17], r0, r4, r2, 75, 25);
+	keyiter(k[18], r1, r0, r3, 76, 26);
+	keyiter(k[19], r2, r1, r4, 77, 27);
+	keyiter(k[20], r3, r2, r0, 78, 28);
+	keyiter(k[21], r4, r3, r1, 79, 29);
+	keyiter(k[22], r0, r4, r2, 80, 30);
+	keyiter(k[23], r1, r0, r3, 81, 31);
 
 	k += 50;
 
-	keyiter(k[-26],r2,r1,r4, 82,-18); keyiter(k[-25],r3,r2,r0, 83,-17);
-	keyiter(k[-24],r4,r3,r1, 84,-16); keyiter(k[-23],r0,r4,r2, 85,-15);
-	keyiter(k[-22],r1,r0,r3, 86,-14); keyiter(k[-21],r2,r1,r4, 87,-13);
-	keyiter(k[-20],r3,r2,r0, 88,-12); keyiter(k[-19],r4,r3,r1, 89,-11);
-	keyiter(k[-18],r0,r4,r2, 90,-10); keyiter(k[-17],r1,r0,r3, 91, -9);
-	keyiter(k[-16],r2,r1,r4, 92, -8); keyiter(k[-15],r3,r2,r0, 93, -7);
-	keyiter(k[-14],r4,r3,r1, 94, -6); keyiter(k[-13],r0,r4,r2, 95, -5);
-	keyiter(k[-12],r1,r0,r3, 96, -4); keyiter(k[-11],r2,r1,r4, 97, -3);
-	keyiter(k[-10],r3,r2,r0, 98, -2); keyiter(k[ -9],r4,r3,r1, 99, -1);
-	keyiter(k[ -8],r0,r4,r2,100,  0); keyiter(k[ -7],r1,r0,r3,101,  1);
-	keyiter(k[ -6],r2,r1,r4,102,  2); keyiter(k[ -5],r3,r2,r0,103,  3);
-	keyiter(k[ -4],r4,r3,r1,104,  4); keyiter(k[ -3],r0,r4,r2,105,  5);
-	keyiter(k[ -2],r1,r0,r3,106,  6); keyiter(k[ -1],r2,r1,r4,107,  7);
-	keyiter(k[  0],r3,r2,r0,108,  8); keyiter(k[  1],r4,r3,r1,109,  9);
-	keyiter(k[  2],r0,r4,r2,110, 10); keyiter(k[  3],r1,r0,r3,111, 11);
-	keyiter(k[  4],r2,r1,r4,112, 12); keyiter(k[  5],r3,r2,r0,113, 13);
-	keyiter(k[  6],r4,r3,r1,114, 14); keyiter(k[  7],r0,r4,r2,115, 15);
-	keyiter(k[  8],r1,r0,r3,116, 16); keyiter(k[  9],r2,r1,r4,117, 17);
-	keyiter(k[ 10],r3,r2,r0,118, 18); keyiter(k[ 11],r4,r3,r1,119, 19);
-	keyiter(k[ 12],r0,r4,r2,120, 20); keyiter(k[ 13],r1,r0,r3,121, 21);
-	keyiter(k[ 14],r2,r1,r4,122, 22); keyiter(k[ 15],r3,r2,r0,123, 23);
-	keyiter(k[ 16],r4,r3,r1,124, 24); keyiter(k[ 17],r0,r4,r2,125, 25);
-	keyiter(k[ 18],r1,r0,r3,126, 26); keyiter(k[ 19],r2,r1,r4,127, 27);
-	keyiter(k[ 20],r3,r2,r0,128, 28); keyiter(k[ 21],r4,r3,r1,129, 29);
-	keyiter(k[ 22],r0,r4,r2,130, 30); keyiter(k[ 23],r1,r0,r3,131, 31);
+	keyiter(k[-26], r2, r1, r4, 82, -18);
+	keyiter(k[-25], r3, r2, r0, 83, -17);
+	keyiter(k[-24], r4, r3, r1, 84, -16);
+	keyiter(k[-23], r0, r4, r2, 85, -15);
+	keyiter(k[-22], r1, r0, r3, 86, -14);
+	keyiter(k[-21], r2, r1, r4, 87, -13);
+	keyiter(k[-20], r3, r2, r0, 88, -12);
+	keyiter(k[-19], r4, r3, r1, 89, -11);
+	keyiter(k[-18], r0, r4, r2, 90, -10);
+	keyiter(k[-17], r1, r0, r3, 91, -9);
+	keyiter(k[-16], r2, r1, r4, 92, -8);
+	keyiter(k[-15], r3, r2, r0, 93, -7);
+	keyiter(k[-14], r4, r3, r1, 94, -6);
+	keyiter(k[-13], r0, r4, r2, 95, -5);
+	keyiter(k[-12], r1, r0, r3, 96, -4);
+	keyiter(k[-11], r2, r1, r4, 97, -3);
+	keyiter(k[-10], r3, r2, r0, 98, -2);
+	keyiter(k[-9], r4, r3, r1, 99, -1);
+	keyiter(k[-8], r0, r4, r2, 100, 0);
+	keyiter(k[-7], r1, r0, r3, 101, 1);
+	keyiter(k[-6], r2, r1, r4, 102, 2);
+	keyiter(k[-5], r3, r2, r0, 103, 3);
+	keyiter(k[-4], r4, r3, r1, 104, 4);
+	keyiter(k[-3], r0, r4, r2, 105, 5);
+	keyiter(k[-2], r1, r0, r3, 106, 6);
+	keyiter(k[-1], r2, r1, r4, 107, 7);
+	keyiter(k[0], r3, r2, r0, 108, 8);
+	keyiter(k[1], r4, r3, r1, 109, 9);
+	keyiter(k[2], r0, r4, r2, 110, 10);
+	keyiter(k[3], r1, r0, r3, 111, 11);
+	keyiter(k[4], r2, r1, r4, 112, 12);
+	keyiter(k[5], r3, r2, r0, 113, 13);
+	keyiter(k[6], r4, r3, r1, 114, 14);
+	keyiter(k[7], r0, r4, r2, 115, 15);
+	keyiter(k[8], r1, r0, r3, 116, 16);
+	keyiter(k[9], r2, r1, r4, 117, 17);
+	keyiter(k[10], r3, r2, r0, 118, 18);
+	keyiter(k[11], r4, r3, r1, 119, 19);
+	keyiter(k[12], r0, r4, r2, 120, 20);
+	keyiter(k[13], r1, r0, r3, 121, 21);
+	keyiter(k[14], r2, r1, r4, 122, 22);
+	keyiter(k[15], r3, r2, r0, 123, 23);
+	keyiter(k[16], r4, r3, r1, 124, 24);
+	keyiter(k[17], r0, r4, r2, 125, 25);
+	keyiter(k[18], r1, r0, r3, 126, 26);
+	keyiter(k[19], r2, r1, r4, 127, 27);
+	keyiter(k[20], r3, r2, r0, 128, 28);
+	keyiter(k[21], r4, r3, r1, 129, 29);
+	keyiter(k[22], r0, r4, r2, 130, 30);
+	keyiter(k[23], r1, r0, r3, 131, 31);
 
 	/* Apply S-boxes */
 
-	S3(r3,r4,r0,r1,r2); storekeys(r1,r2,r4,r3, 28); loadkeys(r1,r2,r4,r3, 24);
-	S4(r1,r2,r4,r3,r0); storekeys(r2,r4,r3,r0, 24); loadkeys(r2,r4,r3,r0, 20);
-	S5(r2,r4,r3,r0,r1); storekeys(r1,r2,r4,r0, 20); loadkeys(r1,r2,r4,r0, 16);
-	S6(r1,r2,r4,r0,r3); storekeys(r4,r3,r2,r0, 16); loadkeys(r4,r3,r2,r0, 12);
-	S7(r4,r3,r2,r0,r1); storekeys(r1,r2,r0,r4, 12); loadkeys(r1,r2,r0,r4,  8);
-	S0(r1,r2,r0,r4,r3); storekeys(r0,r2,r4,r1,  8); loadkeys(r0,r2,r4,r1,  4);
-	S1(r0,r2,r4,r1,r3); storekeys(r3,r4,r1,r0,  4); loadkeys(r3,r4,r1,r0,  0);
-	S2(r3,r4,r1,r0,r2); storekeys(r2,r4,r3,r0,  0); loadkeys(r2,r4,r3,r0, -4);
-	S3(r2,r4,r3,r0,r1); storekeys(r0,r1,r4,r2, -4); loadkeys(r0,r1,r4,r2, -8);
-	S4(r0,r1,r4,r2,r3); storekeys(r1,r4,r2,r3, -8); loadkeys(r1,r4,r2,r3,-12);
-	S5(r1,r4,r2,r3,r0); storekeys(r0,r1,r4,r3,-12); loadkeys(r0,r1,r4,r3,-16);
-	S6(r0,r1,r4,r3,r2); storekeys(r4,r2,r1,r3,-16); loadkeys(r4,r2,r1,r3,-20);
-	S7(r4,r2,r1,r3,r0); storekeys(r0,r1,r3,r4,-20); loadkeys(r0,r1,r3,r4,-24);
-	S0(r0,r1,r3,r4,r2); storekeys(r3,r1,r4,r0,-24); loadkeys(r3,r1,r4,r0,-28);
+	S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
+	S4(r1, r2, r4, r3, r0); store_and_load_keys(r2, r4, r3, r0, 24, 20);
+	S5(r2, r4, r3, r0, r1); store_and_load_keys(r1, r2, r4, r0, 20, 16);
+	S6(r1, r2, r4, r0, r3); store_and_load_keys(r4, r3, r2, r0, 16, 12);
+	S7(r4, r3, r2, r0, r1); store_and_load_keys(r1, r2, r0, r4, 12, 8);
+	S0(r1, r2, r0, r4, r3); store_and_load_keys(r0, r2, r4, r1, 8, 4);
+	S1(r0, r2, r4, r1, r3); store_and_load_keys(r3, r4, r1, r0, 4, 0);
+	S2(r3, r4, r1, r0, r2); store_and_load_keys(r2, r4, r3, r0, 0, -4);
+	S3(r2, r4, r3, r0, r1); store_and_load_keys(r0, r1, r4, r2, -4, -8);
+	S4(r0, r1, r4, r2, r3); store_and_load_keys(r1, r4, r2, r3, -8, -12);
+	S5(r1, r4, r2, r3, r0); store_and_load_keys(r0, r1, r4, r3, -12, -16);
+	S6(r0, r1, r4, r3, r2); store_and_load_keys(r4, r2, r1, r3, -16, -20);
+	S7(r4, r2, r1, r3, r0); store_and_load_keys(r0, r1, r3, r4, -20, -24);
+	S0(r0, r1, r3, r4, r2); store_and_load_keys(r3, r1, r4, r0, -24, -28);
 	k -= 50;
-	S1(r3,r1,r4,r0,r2); storekeys(r2,r4,r0,r3, 22); loadkeys(r2,r4,r0,r3, 18);
-	S2(r2,r4,r0,r3,r1); storekeys(r1,r4,r2,r3, 18); loadkeys(r1,r4,r2,r3, 14);
-	S3(r1,r4,r2,r3,r0); storekeys(r3,r0,r4,r1, 14); loadkeys(r3,r0,r4,r1, 10);
-	S4(r3,r0,r4,r1,r2); storekeys(r0,r4,r1,r2, 10); loadkeys(r0,r4,r1,r2,  6);
-	S5(r0,r4,r1,r2,r3); storekeys(r3,r0,r4,r2,  6); loadkeys(r3,r0,r4,r2,  2);
-	S6(r3,r0,r4,r2,r1); storekeys(r4,r1,r0,r2,  2); loadkeys(r4,r1,r0,r2, -2);
-	S7(r4,r1,r0,r2,r3); storekeys(r3,r0,r2,r4, -2); loadkeys(r3,r0,r2,r4, -6);
-	S0(r3,r0,r2,r4,r1); storekeys(r2,r0,r4,r3, -6); loadkeys(r2,r0,r4,r3,-10);
-	S1(r2,r0,r4,r3,r1); storekeys(r1,r4,r3,r2,-10); loadkeys(r1,r4,r3,r2,-14);
-	S2(r1,r4,r3,r2,r0); storekeys(r0,r4,r1,r2,-14); loadkeys(r0,r4,r1,r2,-18);
-	S3(r0,r4,r1,r2,r3); storekeys(r2,r3,r4,r0,-18); loadkeys(r2,r3,r4,r0,-22);
+	S1(r3, r1, r4, r0, r2); store_and_load_keys(r2, r4, r0, r3, 22, 18);
+	S2(r2, r4, r0, r3, r1); store_and_load_keys(r1, r4, r2, r3, 18, 14);
+	S3(r1, r4, r2, r3, r0); store_and_load_keys(r3, r0, r4, r1, 14, 10);
+	S4(r3, r0, r4, r1, r2); store_and_load_keys(r0, r4, r1, r2, 10, 6);
+	S5(r0, r4, r1, r2, r3); store_and_load_keys(r3, r0, r4, r2, 6, 2);
+	S6(r3, r0, r4, r2, r1); store_and_load_keys(r4, r1, r0, r2, 2, -2);
+	S7(r4, r1, r0, r2, r3); store_and_load_keys(r3, r0, r2, r4, -2, -6);
+	S0(r3, r0, r2, r4, r1); store_and_load_keys(r2, r0, r4, r3, -6, -10);
+	S1(r2, r0, r4, r3, r1); store_and_load_keys(r1, r4, r3, r2, -10, -14);
+	S2(r1, r4, r3, r2, r0); store_and_load_keys(r0, r4, r1, r2, -14, -18);
+	S3(r0, r4, r1, r2, r3); store_and_load_keys(r2, r3, r4, r0, -18, -22);
 	k -= 50;
-	S4(r2,r3,r4,r0,r1); storekeys(r3,r4,r0,r1, 28); loadkeys(r3,r4,r0,r1, 24);
-	S5(r3,r4,r0,r1,r2); storekeys(r2,r3,r4,r1, 24); loadkeys(r2,r3,r4,r1, 20);
-	S6(r2,r3,r4,r1,r0); storekeys(r4,r0,r3,r1, 20); loadkeys(r4,r0,r3,r1, 16);
-	S7(r4,r0,r3,r1,r2); storekeys(r2,r3,r1,r4, 16); loadkeys(r2,r3,r1,r4, 12);
-	S0(r2,r3,r1,r4,r0); storekeys(r1,r3,r4,r2, 12); loadkeys(r1,r3,r4,r2,  8);
-	S1(r1,r3,r4,r2,r0); storekeys(r0,r4,r2,r1,  8); loadkeys(r0,r4,r2,r1,  4);
-	S2(r0,r4,r2,r1,r3); storekeys(r3,r4,r0,r1,  4); loadkeys(r3,r4,r0,r1,  0);
-	S3(r3,r4,r0,r1,r2); storekeys(r1,r2,r4,r3,  0);
+	S4(r2, r3, r4, r0, r1); store_and_load_keys(r3, r4, r0, r1, 28, 24);
+	S5(r3, r4, r0, r1, r2); store_and_load_keys(r2, r3, r4, r1, 24, 20);
+	S6(r2, r3, r4, r1, r0); store_and_load_keys(r4, r0, r3, r1, 20, 16);
+	S7(r4, r0, r3, r1, r2); store_and_load_keys(r2, r3, r1, r4, 16, 12);
+	S0(r2, r3, r1, r4, r0); store_and_load_keys(r1, r3, r4, r2, 12, 8);
+	S1(r1, r3, r4, r2, r0); store_and_load_keys(r0, r4, r2, r1, 8, 4);
+	S2(r0, r4, r2, r1, r3); store_and_load_keys(r3, r4, r0, r1, 4, 0);
+	S3(r3, r4, r0, r1, r2); storekeys(r1, r2, r4, r3, 0);
 
 	return 0;
 }
@@ -374,39 +459,39 @@ void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
 	r2 = le32_to_cpu(s[2]);
 	r3 = le32_to_cpu(s[3]);
 
-				 K(r0,r1,r2,r3,0);
-	S0(r0,r1,r2,r3,r4);	LK(r2,r1,r3,r0,r4,1);
-	S1(r2,r1,r3,r0,r4);	LK(r4,r3,r0,r2,r1,2);
-	S2(r4,r3,r0,r2,r1);	LK(r1,r3,r4,r2,r0,3);
-	S3(r1,r3,r4,r2,r0);	LK(r2,r0,r3,r1,r4,4);
-	S4(r2,r0,r3,r1,r4);	LK(r0,r3,r1,r4,r2,5);
-	S5(r0,r3,r1,r4,r2);	LK(r2,r0,r3,r4,r1,6);
-	S6(r2,r0,r3,r4,r1);	LK(r3,r1,r0,r4,r2,7);
-	S7(r3,r1,r0,r4,r2);	LK(r2,r0,r4,r3,r1,8);
-	S0(r2,r0,r4,r3,r1);	LK(r4,r0,r3,r2,r1,9);
-	S1(r4,r0,r3,r2,r1);	LK(r1,r3,r2,r4,r0,10);
-	S2(r1,r3,r2,r4,r0);	LK(r0,r3,r1,r4,r2,11);
-	S3(r0,r3,r1,r4,r2);	LK(r4,r2,r3,r0,r1,12);
-	S4(r4,r2,r3,r0,r1);	LK(r2,r3,r0,r1,r4,13);
-	S5(r2,r3,r0,r1,r4);	LK(r4,r2,r3,r1,r0,14);
-	S6(r4,r2,r3,r1,r0);	LK(r3,r0,r2,r1,r4,15);
-	S7(r3,r0,r2,r1,r4);	LK(r4,r2,r1,r3,r0,16);
-	S0(r4,r2,r1,r3,r0);	LK(r1,r2,r3,r4,r0,17);
-	S1(r1,r2,r3,r4,r0);	LK(r0,r3,r4,r1,r2,18);
-	S2(r0,r3,r4,r1,r2);	LK(r2,r3,r0,r1,r4,19);
-	S3(r2,r3,r0,r1,r4);	LK(r1,r4,r3,r2,r0,20);
-	S4(r1,r4,r3,r2,r0);	LK(r4,r3,r2,r0,r1,21);
-	S5(r4,r3,r2,r0,r1);	LK(r1,r4,r3,r0,r2,22);
-	S6(r1,r4,r3,r0,r2);	LK(r3,r2,r4,r0,r1,23);
-	S7(r3,r2,r4,r0,r1);	LK(r1,r4,r0,r3,r2,24);
-	S0(r1,r4,r0,r3,r2);	LK(r0,r4,r3,r1,r2,25);
-	S1(r0,r4,r3,r1,r2);	LK(r2,r3,r1,r0,r4,26);
-	S2(r2,r3,r1,r0,r4);	LK(r4,r3,r2,r0,r1,27);
-	S3(r4,r3,r2,r0,r1);	LK(r0,r1,r3,r4,r2,28);
-	S4(r0,r1,r3,r4,r2);	LK(r1,r3,r4,r2,r0,29);
-	S5(r1,r3,r4,r2,r0);	LK(r0,r1,r3,r2,r4,30);
-	S6(r0,r1,r3,r2,r4);	LK(r3,r4,r1,r2,r0,31);
-	S7(r3,r4,r1,r2,r0);	 K(r0,r1,r2,r3,32);
+					K(r0, r1, r2, r3, 0);
+	S0(r0, r1, r2, r3, r4);		LK(r2, r1, r3, r0, r4, 1);
+	S1(r2, r1, r3, r0, r4);		LK(r4, r3, r0, r2, r1, 2);
+	S2(r4, r3, r0, r2, r1);		LK(r1, r3, r4, r2, r0, 3);
+	S3(r1, r3, r4, r2, r0);		LK(r2, r0, r3, r1, r4, 4);
+	S4(r2, r0, r3, r1, r4);		LK(r0, r3, r1, r4, r2, 5);
+	S5(r0, r3, r1, r4, r2);		LK(r2, r0, r3, r4, r1, 6);
+	S6(r2, r0, r3, r4, r1);		LK(r3, r1, r0, r4, r2, 7);
+	S7(r3, r1, r0, r4, r2);		LK(r2, r0, r4, r3, r1, 8);
+	S0(r2, r0, r4, r3, r1);		LK(r4, r0, r3, r2, r1, 9);
+	S1(r4, r0, r3, r2, r1);		LK(r1, r3, r2, r4, r0, 10);
+	S2(r1, r3, r2, r4, r0);		LK(r0, r3, r1, r4, r2, 11);
+	S3(r0, r3, r1, r4, r2);		LK(r4, r2, r3, r0, r1, 12);
+	S4(r4, r2, r3, r0, r1);		LK(r2, r3, r0, r1, r4, 13);
+	S5(r2, r3, r0, r1, r4);		LK(r4, r2, r3, r1, r0, 14);
+	S6(r4, r2, r3, r1, r0);		LK(r3, r0, r2, r1, r4, 15);
+	S7(r3, r0, r2, r1, r4);		LK(r4, r2, r1, r3, r0, 16);
+	S0(r4, r2, r1, r3, r0);		LK(r1, r2, r3, r4, r0, 17);
+	S1(r1, r2, r3, r4, r0);		LK(r0, r3, r4, r1, r2, 18);
+	S2(r0, r3, r4, r1, r2);		LK(r2, r3, r0, r1, r4, 19);
+	S3(r2, r3, r0, r1, r4);		LK(r1, r4, r3, r2, r0, 20);
+	S4(r1, r4, r3, r2, r0);		LK(r4, r3, r2, r0, r1, 21);
+	S5(r4, r3, r2, r0, r1);		LK(r1, r4, r3, r0, r2, 22);
+	S6(r1, r4, r3, r0, r2);		LK(r3, r2, r4, r0, r1, 23);
+	S7(r3, r2, r4, r0, r1);		LK(r1, r4, r0, r3, r2, 24);
+	S0(r1, r4, r0, r3, r2);		LK(r0, r4, r3, r1, r2, 25);
+	S1(r0, r4, r3, r1, r2);		LK(r2, r3, r1, r0, r4, 26);
+	S2(r2, r3, r1, r0, r4);		LK(r4, r3, r2, r0, r1, 27);
+	S3(r4, r3, r2, r0, r1);		LK(r0, r1, r3, r4, r2, 28);
+	S4(r0, r1, r3, r4, r2);		LK(r1, r3, r4, r2, r0, 29);
+	S5(r1, r3, r4, r2, r0);		LK(r0, r1, r3, r2, r4, 30);
+	S6(r0, r1, r3, r2, r4);		LK(r3, r4, r1, r2, r0, 31);
+	S7(r3, r4, r1, r2, r0);		K(r0, r1, r2, r3, 32);
 
 	d[0] = cpu_to_le32(r0);
 	d[1] = cpu_to_le32(r1);
@@ -434,39 +519,39 @@ void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
 	r2 = le32_to_cpu(s[2]);
 	r3 = le32_to_cpu(s[3]);
 
-				K(r0,r1,r2,r3,32);
-	SI7(r0,r1,r2,r3,r4);	KL(r1,r3,r0,r4,r2,31);
-	SI6(r1,r3,r0,r4,r2);	KL(r0,r2,r4,r1,r3,30);
-	SI5(r0,r2,r4,r1,r3);	KL(r2,r3,r0,r4,r1,29);
-	SI4(r2,r3,r0,r4,r1);	KL(r2,r0,r1,r4,r3,28);
-	SI3(r2,r0,r1,r4,r3);	KL(r1,r2,r3,r4,r0,27);
-	SI2(r1,r2,r3,r4,r0);	KL(r2,r0,r4,r3,r1,26);
-	SI1(r2,r0,r4,r3,r1);	KL(r1,r0,r4,r3,r2,25);
-	SI0(r1,r0,r4,r3,r2);	KL(r4,r2,r0,r1,r3,24);
-	SI7(r4,r2,r0,r1,r3);	KL(r2,r1,r4,r3,r0,23);
-	SI6(r2,r1,r4,r3,r0);	KL(r4,r0,r3,r2,r1,22);
-	SI5(r4,r0,r3,r2,r1);	KL(r0,r1,r4,r3,r2,21);
-	SI4(r0,r1,r4,r3,r2);	KL(r0,r4,r2,r3,r1,20);
-	SI3(r0,r4,r2,r3,r1);	KL(r2,r0,r1,r3,r4,19);
-	SI2(r2,r0,r1,r3,r4);	KL(r0,r4,r3,r1,r2,18);
-	SI1(r0,r4,r3,r1,r2);	KL(r2,r4,r3,r1,r0,17);
-	SI0(r2,r4,r3,r1,r0);	KL(r3,r0,r4,r2,r1,16);
-	SI7(r3,r0,r4,r2,r1);	KL(r0,r2,r3,r1,r4,15);
-	SI6(r0,r2,r3,r1,r4);	KL(r3,r4,r1,r0,r2,14);
-	SI5(r3,r4,r1,r0,r2);	KL(r4,r2,r3,r1,r0,13);
-	SI4(r4,r2,r3,r1,r0);	KL(r4,r3,r0,r1,r2,12);
-	SI3(r4,r3,r0,r1,r2);	KL(r0,r4,r2,r1,r3,11);
-	SI2(r0,r4,r2,r1,r3);	KL(r4,r3,r1,r2,r0,10);
-	SI1(r4,r3,r1,r2,r0);	KL(r0,r3,r1,r2,r4,9);
-	SI0(r0,r3,r1,r2,r4);	KL(r1,r4,r3,r0,r2,8);
-	SI7(r1,r4,r3,r0,r2);	KL(r4,r0,r1,r2,r3,7);
-	SI6(r4,r0,r1,r2,r3);	KL(r1,r3,r2,r4,r0,6);
-	SI5(r1,r3,r2,r4,r0);	KL(r3,r0,r1,r2,r4,5);
-	SI4(r3,r0,r1,r2,r4);	KL(r3,r1,r4,r2,r0,4);
-	SI3(r3,r1,r4,r2,r0);	KL(r4,r3,r0,r2,r1,3);
-	SI2(r4,r3,r0,r2,r1);	KL(r3,r1,r2,r0,r4,2);
-	SI1(r3,r1,r2,r0,r4);	KL(r4,r1,r2,r0,r3,1);
-	SI0(r4,r1,r2,r0,r3);	K(r2,r3,r1,r4,0);
+					K(r0, r1, r2, r3, 32);
+	SI7(r0, r1, r2, r3, r4);	KL(r1, r3, r0, r4, r2, 31);
+	SI6(r1, r3, r0, r4, r2);	KL(r0, r2, r4, r1, r3, 30);
+	SI5(r0, r2, r4, r1, r3);	KL(r2, r3, r0, r4, r1, 29);
+	SI4(r2, r3, r0, r4, r1);	KL(r2, r0, r1, r4, r3, 28);
+	SI3(r2, r0, r1, r4, r3);	KL(r1, r2, r3, r4, r0, 27);
+	SI2(r1, r2, r3, r4, r0);	KL(r2, r0, r4, r3, r1, 26);
+	SI1(r2, r0, r4, r3, r1);	KL(r1, r0, r4, r3, r2, 25);
+	SI0(r1, r0, r4, r3, r2);	KL(r4, r2, r0, r1, r3, 24);
+	SI7(r4, r2, r0, r1, r3);	KL(r2, r1, r4, r3, r0, 23);
+	SI6(r2, r1, r4, r3, r0);	KL(r4, r0, r3, r2, r1, 22);
+	SI5(r4, r0, r3, r2, r1);	KL(r0, r1, r4, r3, r2, 21);
+	SI4(r0, r1, r4, r3, r2);	KL(r0, r4, r2, r3, r1, 20);
+	SI3(r0, r4, r2, r3, r1);	KL(r2, r0, r1, r3, r4, 19);
+	SI2(r2, r0, r1, r3, r4);	KL(r0, r4, r3, r1, r2, 18);
+	SI1(r0, r4, r3, r1, r2);	KL(r2, r4, r3, r1, r0, 17);
+	SI0(r2, r4, r3, r1, r0);	KL(r3, r0, r4, r2, r1, 16);
+	SI7(r3, r0, r4, r2, r1);	KL(r0, r2, r3, r1, r4, 15);
+	SI6(r0, r2, r3, r1, r4);	KL(r3, r4, r1, r0, r2, 14);
+	SI5(r3, r4, r1, r0, r2);	KL(r4, r2, r3, r1, r0, 13);
+	SI4(r4, r2, r3, r1, r0);	KL(r4, r3, r0, r1, r2, 12);
+	SI3(r4, r3, r0, r1, r2);	KL(r0, r4, r2, r1, r3, 11);
+	SI2(r0, r4, r2, r1, r3);	KL(r4, r3, r1, r2, r0, 10);
+	SI1(r4, r3, r1, r2, r0);	KL(r0, r3, r1, r2, r4, 9);
+	SI0(r0, r3, r1, r2, r4);	KL(r1, r4, r3, r0, r2, 8);
+	SI7(r1, r4, r3, r0, r2);	KL(r4, r0, r1, r2, r3, 7);
+	SI6(r4, r0, r1, r2, r3);	KL(r1, r3, r2, r4, r0, 6);
+	SI5(r1, r3, r2, r4, r0);	KL(r3, r0, r1, r2, r4, 5);
+	SI4(r3, r0, r1, r2, r4);	KL(r3, r1, r4, r2, r0, 4);
+	SI3(r3, r1, r4, r2, r0);	KL(r4, r3, r0, r2, r1, 3);
+	SI2(r4, r3, r0, r2, r1);	KL(r3, r1, r2, r0, r4, 2);
+	SI1(r3, r1, r2, r0, r4);	KL(r4, r1, r2, r0, r3, 1);
+	SI0(r4, r1, r2, r0, r3);	K(r2, r3, r1, r4, 0);
 
 	d[0] = cpu_to_le32(r2);
 	d[1] = cpu_to_le32(r3);
@@ -495,9 +580,9 @@ static struct crypto_alg serpent_alg = {
 	.cra_u			=	{ .cipher = {
 	.cia_min_keysize	=	SERPENT_MIN_KEY_SIZE,
 	.cia_max_keysize	=	SERPENT_MAX_KEY_SIZE,
-	.cia_setkey   		= 	serpent_setkey,
-	.cia_encrypt 		=	serpent_encrypt,
-	.cia_decrypt  		=	serpent_decrypt } }
+	.cia_setkey		=	serpent_setkey,
+	.cia_encrypt		=	serpent_encrypt,
+	.cia_decrypt		=	serpent_decrypt } }
 };
 
 static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
@@ -508,7 +593,7 @@ static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
 
 	for (i = 0; i < keylen; ++i)
 		rev_key[keylen - i - 1] = key[i];
- 
+
 	return serpent_setkey(tfm, rev_key, keylen);
 }
 
@@ -563,9 +648,9 @@ static struct crypto_alg tnepres_alg = {
 	.cra_u			=	{ .cipher = {
 	.cia_min_keysize	=	SERPENT_MIN_KEY_SIZE,
 	.cia_max_keysize	=	SERPENT_MAX_KEY_SIZE,
-	.cia_setkey   		= 	tnepres_setkey,
-	.cia_encrypt 		=	tnepres_encrypt,
-	.cia_decrypt  		=	tnepres_decrypt } }
+	.cia_setkey		=	tnepres_setkey,
+	.cia_encrypt		=	tnepres_encrypt,
+	.cia_decrypt		=	tnepres_decrypt } }
 };
 
 static int __init serpent_mod_init(void)

From 2deed786d99390d5abe06a2a300d0643305bffcb Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Wed, 23 Nov 2011 12:21:06 +0200
Subject: [PATCH 36/54] crypto: serpent - rename serpent.c to serpent_generic.c

Now that serpent.c has been cleaned from checkpatch warnings,
we can do clean rename.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/Makefile                         | 2 --
 crypto/{serpent.c => serpent_generic.c} | 0
 2 files changed, 2 deletions(-)
 rename crypto/{serpent.c => serpent_generic.c} (100%)

diff --git a/crypto/Makefile b/crypto/Makefile
index 0e1f2aa7218e..f638063f4ea9 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -65,8 +65,6 @@ obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
 obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o
 obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
 obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
-
-serpent_generic-y := serpent.o
 obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
 obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia.o
diff --git a/crypto/serpent.c b/crypto/serpent_generic.c
similarity index 100%
rename from crypto/serpent.c
rename to crypto/serpent_generic.c

From 341975bf3af8f492f8e6ffc3d0fdf6f9c43e1691 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Thu, 24 Nov 2011 08:37:41 +0200
Subject: [PATCH 37/54] crypto: serpent-sse2 - should select CRYPTO_CRYPTD

Since serpent_sse2_glue.c uses cryptd, CRYPTO_SERPENT_SSE2_X86_64 and
CRYPTO_SERPENT_SSE2_586 should be selecting CRYPTO_CRYPTD.

Reported-by: Randy Dunlap <rdunlap@xenotime.net>
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Acked-by: Randy Dunlap <rdunlap@xenotime.net>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/Kconfig | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index 16e0c1304c3a..b8d1b10aedab 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -770,6 +770,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
 	tristate "Serpent cipher algorithm (x86_64/SSE2)"
 	depends on X86 && 64BIT
 	select CRYPTO_ALGAPI
+	select CRYPTO_CRYPTD
 	select CRYPTO_SERPENT
 	help
 	  Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@@ -787,6 +788,7 @@ config CRYPTO_SERPENT_SSE2_586
 	tristate "Serpent cipher algorithm (i586/SSE2)"
 	depends on X86 && !64BIT
 	select CRYPTO_ALGAPI
+	select CRYPTO_CRYPTD
 	select CRYPTO_SERPENT
 	help
 	  Serpent cipher algorithm, by Anderson, Biham & Knudsen.

From b21cb324f141d16833137ef0355f686efb9bd84f Mon Sep 17 00:00:00 2001
From: Axel Lin <axel.lin@gmail.com>
Date: Sat, 26 Nov 2011 21:11:06 +0800
Subject: [PATCH 38/54] char: hw_random: convert drivers/char/hw_random/* to
 use module_platform_driver()

This patch converts the drivers in drivers/char/hw_random/* to use the
module_platform_driver() macro which makes the code smaller and a bit
simpler.

Cc: David S. Miller <davem@davemloft.net>
Cc: Josh Boyer <jwboyer@linux.vnet.ibm.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Axel Lin <axel.lin@gmail.com>
Acked-by: Jamie Iles <jamie@jamieiles.com>
Acked-by: Alexander Clouter <alex@digriz.org.uk>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: David Daney <david.daney@cavium.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/char/hw_random/atmel-rng.c      | 12 +-----------
 drivers/char/hw_random/n2-drv.c         | 13 +------------
 drivers/char/hw_random/octeon-rng.c     | 13 +------------
 drivers/char/hw_random/pasemi-rng.c     | 12 +-----------
 drivers/char/hw_random/picoxcell-rng.c  | 12 +-----------
 drivers/char/hw_random/ppc4xx-rng.c     | 12 +-----------
 drivers/char/hw_random/timeriomem-rng.c | 13 +------------
 7 files changed, 7 insertions(+), 80 deletions(-)

diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 241df2e76aba..f518b99f53f5 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -141,17 +141,7 @@ static struct platform_driver atmel_trng_driver = {
 	},
 };
 
-static int __init atmel_trng_init(void)
-{
-	return platform_driver_register(&atmel_trng_driver);
-}
-module_init(atmel_trng_init);
-
-static void __exit atmel_trng_exit(void)
-{
-	platform_driver_unregister(&atmel_trng_driver);
-}
-module_exit(atmel_trng_exit);
+module_platform_driver(atmel_trng_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index c3de70de00d4..ebd48f0135da 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -770,15 +770,4 @@ static struct platform_driver n2rng_driver = {
 	.remove		= __devexit_p(n2rng_remove),
 };
 
-static int __init n2rng_init(void)
-{
-	return platform_driver_register(&n2rng_driver);
-}
-
-static void __exit n2rng_exit(void)
-{
-	platform_driver_unregister(&n2rng_driver);
-}
-
-module_init(n2rng_init);
-module_exit(n2rng_exit);
+module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 9cd0feca318c..0943edc782a1 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -131,18 +131,7 @@ static struct platform_driver octeon_rng_driver = {
 	.remove		= __exit_p(octeon_rng_remove),
 };
 
-static int __init octeon_rng_mod_init(void)
-{
-	return platform_driver_register(&octeon_rng_driver);
-}
-
-static void __exit octeon_rng_mod_exit(void)
-{
-	platform_driver_unregister(&octeon_rng_driver);
-}
-
-module_init(octeon_rng_mod_init);
-module_exit(octeon_rng_mod_exit);
+module_platform_driver(octeon_rng_driver);
 
 MODULE_AUTHOR("David Daney");
 MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 1d504815e6db..3a632673aed5 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -148,17 +148,7 @@ static struct platform_driver rng_driver = {
 	.remove		= rng_remove,
 };
 
-static int __init rng_init(void)
-{
-	return platform_driver_register(&rng_driver);
-}
-module_init(rng_init);
-
-static void __exit rng_exit(void)
-{
-	platform_driver_unregister(&rng_driver);
-}
-module_exit(rng_exit);
+module_platform_driver(rng_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c
index 990d55a5e3e8..97bd891422c7 100644
--- a/drivers/char/hw_random/picoxcell-rng.c
+++ b/drivers/char/hw_random/picoxcell-rng.c
@@ -191,17 +191,7 @@ static struct platform_driver picoxcell_trng_driver = {
 	},
 };
 
-static int __init picoxcell_trng_init(void)
-{
-	return platform_driver_register(&picoxcell_trng_driver);
-}
-module_init(picoxcell_trng_init);
-
-static void __exit picoxcell_trng_exit(void)
-{
-	platform_driver_unregister(&picoxcell_trng_driver);
-}
-module_exit(picoxcell_trng_exit);
+module_platform_driver(picoxcell_trng_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
index b8afa6a4ff67..c51762c13031 100644
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ b/drivers/char/hw_random/ppc4xx-rng.c
@@ -139,17 +139,7 @@ static struct platform_driver ppc4xx_rng_driver = {
 	.remove = ppc4xx_rng_remove,
 };
 
-static int __init ppc4xx_rng_init(void)
-{
-	return platform_driver_register(&ppc4xx_rng_driver);
-}
-module_init(ppc4xx_rng_init);
-
-static void __exit ppc4xx_rng_exit(void)
-{
-	platform_driver_unregister(&ppc4xx_rng_driver);
-}
-module_exit(ppc4xx_rng_exit);
+module_platform_driver(ppc4xx_rng_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>");
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index a8428e6f64a9..f1a1618db1fb 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -149,18 +149,7 @@ static struct platform_driver timeriomem_rng_driver = {
 	.remove		= __devexit_p(timeriomem_rng_remove),
 };
 
-static int __init timeriomem_rng_init(void)
-{
-	return platform_driver_register(&timeriomem_rng_driver);
-}
-
-static void __exit timeriomem_rng_exit(void)
-{
-	platform_driver_unregister(&timeriomem_rng_driver);
-}
-
-module_init(timeriomem_rng_init);
-module_exit(timeriomem_rng_exit);
+module_platform_driver(timeriomem_rng_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");

From 741e8c2d8177eca656bc015ef83ab84d817edf8c Mon Sep 17 00:00:00 2001
From: Axel Lin <axel.lin@gmail.com>
Date: Sat, 26 Nov 2011 21:26:19 +0800
Subject: [PATCH 39/54] crypto: convert drivers/crypto/* to use
 module_platform_driver()

This patch converts the drivers in drivers/crypto/* to use the
module_platform_driver() macro which makes the code smaller and a bit
simpler.

Cc: James Hsiao <jhsiao@amcc.com>
Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
Cc: Kim Phillips <kim.phillips@freescale.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Axel Lin <axel.lin@gmail.com>
Acked-by: Vladimir Zapolskiy <vzapolskiy@gmail.com>
Acked-by: Jamie Iles <jamie@jamieiles.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/amcc/crypto4xx_core.c | 13 +------------
 drivers/crypto/caam/ctrl.c           | 13 +------------
 drivers/crypto/mv_cesa.c             | 12 +-----------
 drivers/crypto/picoxcell_crypto.c    | 12 +-----------
 drivers/crypto/s5p-sss.c             | 13 +------------
 drivers/crypto/talitos.c             | 12 +-----------
 6 files changed, 6 insertions(+), 69 deletions(-)

diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 1d103f997dc2..13f8e1a14988 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1292,18 +1292,7 @@ static struct platform_driver crypto4xx_driver = {
 	.remove		= crypto4xx_remove,
 };
 
-static int __init crypto4xx_init(void)
-{
-	return platform_driver_register(&crypto4xx_driver);
-}
-
-static void __exit crypto4xx_exit(void)
-{
-	platform_driver_unregister(&crypto4xx_driver);
-}
-
-module_init(crypto4xx_init);
-module_exit(crypto4xx_exit);
+module_platform_driver(crypto4xx_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 73988bb7322a..048bb2343231 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -253,18 +253,7 @@ static struct platform_driver caam_driver = {
 	.remove      = __devexit_p(caam_remove),
 };
 
-static int __init caam_base_init(void)
-{
-	return platform_driver_register(&caam_driver);
-}
-
-static void __exit caam_base_exit(void)
-{
-	return platform_driver_unregister(&caam_driver);
-}
-
-module_init(caam_base_init);
-module_exit(caam_base_exit);
+module_platform_driver(caam_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("FSL CAAM request backend");
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 5c6f56f21443..015c0fcea0bd 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1126,17 +1126,7 @@ static struct platform_driver marvell_crypto = {
 };
 MODULE_ALIAS("platform:mv_crypto");
 
-static int __init mv_crypto_init(void)
-{
-	return platform_driver_register(&marvell_crypto);
-}
-module_init(mv_crypto_init);
-
-static void __exit mv_crypto_exit(void)
-{
-	platform_driver_unregister(&marvell_crypto);
-}
-module_exit(mv_crypto_exit);
+module_platform_driver(marvell_crypto);
 
 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index a2b553eabbdb..9b1571c2a5a4 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1854,17 +1854,7 @@ static struct platform_driver spacc_driver = {
 	.id_table	= spacc_id_table,
 };
 
-static int __init spacc_init(void)
-{
-	return platform_driver_register(&spacc_driver);
-}
-module_init(spacc_init);
-
-static void __exit spacc_exit(void)
-{
-	platform_driver_unregister(&spacc_driver);
-}
-module_exit(spacc_exit);
+module_platform_driver(spacc_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 8115417a1c93..3376bca200fc 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -683,18 +683,7 @@ static struct platform_driver s5p_aes_crypto = {
 	},
 };
 
-static int __init s5p_aes_mod_init(void)
-{
-	return  platform_driver_register(&s5p_aes_crypto);
-}
-
-static void __exit s5p_aes_mod_exit(void)
-{
-	platform_driver_unregister(&s5p_aes_crypto);
-}
-
-module_init(s5p_aes_mod_init);
-module_exit(s5p_aes_mod_exit);
+module_platform_driver(s5p_aes_crypto);
 
 MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 92c0ca75400d..230509e9602b 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2894,17 +2894,7 @@ static struct platform_driver talitos_driver = {
 	.remove = talitos_remove,
 };
 
-static int __init talitos_init(void)
-{
-	return platform_driver_register(&talitos_driver);
-}
-module_init(talitos_init);
-
-static void __exit talitos_exit(void)
-{
-	platform_driver_unregister(&talitos_driver);
-}
-module_exit(talitos_exit);
+module_platform_driver(talitos_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");

From 0b2730d8d8b38e009607d5a094d48fcce73af547 Mon Sep 17 00:00:00 2001
From: Kim Phillips <kim.phillips@freescale.com>
Date: Mon, 12 Dec 2011 14:59:10 -0600
Subject: [PATCH 40/54] crypto: talitos - fix bad kfree

Fix a kfree to an invalid address which causes an oops when running
on SEC v2.0 h/w (introduced in commit 702331b "crypto: talitos - add
hmac algorithms").

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/talitos.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 230509e9602b..d376cc7c5e8f 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2626,8 +2626,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
 		alg = &t_alg->algt.alg.hash.halg.base;
 		alg->cra_init = talitos_cra_init_ahash;
 		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
-		    !strncmp(alg->cra_name, "hmac", 4))
+		    !strncmp(alg->cra_name, "hmac", 4)) {
+			kfree(t_alg);
 			return ERR_PTR(-ENOTSUPP);
+		}
 		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
 		    (!strcmp(alg->cra_name, "sha224") ||
 		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
@@ -2835,10 +2837,8 @@ static int talitos_probe(struct platform_device *ofdev)
 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
 			if (IS_ERR(t_alg)) {
 				err = PTR_ERR(t_alg);
-				if (err == -ENOTSUPP) {
-					kfree(t_alg);
+				if (err == -ENOTSUPP)
 					continue;
-				}
 				goto err_out;
 			}
 

From 2cdba3cf6ffc1fbf880a6fbfa9e7bb757e3d6526 Mon Sep 17 00:00:00 2001
From: Kim Phillips <kim.phillips@freescale.com>
Date: Mon, 12 Dec 2011 14:59:11 -0600
Subject: [PATCH 41/54] crypto: talitos - remove NO_IRQ references

As prescribed by Linus:

https://lkml.org/lkml/2011/12/2/290

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/talitos.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index d376cc7c5e8f..503d0d8a58f5 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -234,7 +234,7 @@ static int reset_device(struct device *dev)
 	       && --timeout)
 		cpu_relax();
 
-	if (priv->irq[1] != NO_IRQ) {
+	if (priv->irq[1]) {
 		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
 		setbits32(priv->reg + TALITOS_MCR, mcr);
 	}
@@ -2581,13 +2581,13 @@ static int talitos_remove(struct platform_device *ofdev)
 	kfree(priv->chan);
 
 	for (i = 0; i < 2; i++)
-		if (priv->irq[i] != NO_IRQ) {
+		if (priv->irq[i]) {
 			free_irq(priv->irq[i], dev);
 			irq_dispose_mapping(priv->irq[i]);
 		}
 
 	tasklet_kill(&priv->done_task[0]);
-	if (priv->irq[1] != NO_IRQ)
+	if (priv->irq[1])
 		tasklet_kill(&priv->done_task[1]);
 
 	iounmap(priv->reg);
@@ -2663,7 +2663,7 @@ static int talitos_probe_irq(struct platform_device *ofdev)
 	int err;
 
 	priv->irq[0] = irq_of_parse_and_map(np, 0);
-	if (priv->irq[0] == NO_IRQ) {
+	if (!priv->irq[0]) {
 		dev_err(dev, "failed to map irq\n");
 		return -EINVAL;
 	}
@@ -2671,7 +2671,7 @@ static int talitos_probe_irq(struct platform_device *ofdev)
 	priv->irq[1] = irq_of_parse_and_map(np, 1);
 
 	/* get the primary irq line */
-	if (priv->irq[1] == NO_IRQ) {
+	if (!priv->irq[1]) {
 		err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
 				  dev_driver_string(dev), dev);
 		goto primary_out;
@@ -2688,7 +2688,7 @@ static int talitos_probe_irq(struct platform_device *ofdev)
 	if (err) {
 		dev_err(dev, "failed to request secondary irq\n");
 		irq_dispose_mapping(priv->irq[1]);
-		priv->irq[1] = NO_IRQ;
+		priv->irq[1] = 0;
 	}
 
 	return err;
@@ -2697,7 +2697,7 @@ primary_out:
 	if (err) {
 		dev_err(dev, "failed to request primary irq\n");
 		irq_dispose_mapping(priv->irq[0]);
-		priv->irq[0] = NO_IRQ;
+		priv->irq[0] = 0;
 	}
 
 	return err;
@@ -2723,7 +2723,7 @@ static int talitos_probe(struct platform_device *ofdev)
 	if (err)
 		goto err_out;
 
-	if (priv->irq[1] == NO_IRQ) {
+	if (!priv->irq[1]) {
 		tasklet_init(&priv->done_task[0], talitos_done_4ch,
 			     (unsigned long)dev);
 	} else {
@@ -2784,7 +2784,7 @@ static int talitos_probe(struct platform_device *ofdev)
 
 	for (i = 0; i < priv->num_channels; i++) {
 		priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
-		if ((priv->irq[1] == NO_IRQ) || !(i & 1))
+		if (!priv->irq[1] || !(i & 1))
 			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
 	}
 

From 5e718a09e5cafc8922f3fe22206423449a2801c9 Mon Sep 17 00:00:00 2001
From: Kim Phillips <kim.phillips@freescale.com>
Date: Mon, 12 Dec 2011 14:59:12 -0600
Subject: [PATCH 42/54] crypto: talitos - convert talitos_error to struct
 device

SEC2/3 h/w doesn't have a dedicated interrupt for errors,
and the only callsite for talitos_error has already done
the type conversion, so simplify talitos_error to take a
pointer to a struct device.

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/talitos.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 503d0d8a58f5..2d8c78901686 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -534,9 +534,8 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
 /*
  * recover from error interrupts
  */
-static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
+static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
 {
-	struct device *dev = (struct device *)data;
 	struct talitos_private *priv = dev_get_drvdata(dev);
 	unsigned int timeout = TALITOS_TIMEOUT;
 	int ch, error, reset_dev = 0, reset_ch = 0;
@@ -628,7 +627,7 @@ static irqreturn_t talitos_interrupt_##name(int irq, void *data)	       \
 	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
 									       \
 	if (unlikely((isr & ~TALITOS_ISR_4CHDONE) & ch_err_mask || isr_lo))    \
-		talitos_error((unsigned long)data, isr, isr_lo);	       \
+		talitos_error(dev, isr, isr_lo);			       \
 	else								       \
 		if (likely(isr & ch_done_mask)) {			       \
 			/* mask further done interrupts. */		       \

From 1582fa814728825b56cc0a2b0fc0cc8862f83277 Mon Sep 17 00:00:00 2001
From: Kim Phillips <kim.phillips@freescale.com>
Date: Mon, 12 Dec 2011 14:59:13 -0600
Subject: [PATCH 43/54] crypto: caam - desc.h - convert spaces to tabs

this is the result of running unexpand -a on desc.h.

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/caam/desc.h | 1930 ++++++++++++++++++------------------
 1 file changed, 965 insertions(+), 965 deletions(-)

diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 974a75842da9..090c91661f15 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -9,7 +9,7 @@
 #define DESC_H
 
 /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
-#define MAX_CAAM_DESCSIZE       64
+#define MAX_CAAM_DESCSIZE	64
 
 /* Block size of any entity covered/uncovered with a KEK/TKEK */
 #define KEK_BLOCKSIZE		16
@@ -18,38 +18,38 @@
  * Supported descriptor command types as they show up
  * inside a descriptor command word.
  */
-#define CMD_SHIFT               27
-#define CMD_MASK                0xf8000000
+#define CMD_SHIFT		27
+#define CMD_MASK		0xf8000000
 
-#define CMD_KEY                 (0x00 << CMD_SHIFT)
-#define CMD_SEQ_KEY             (0x01 << CMD_SHIFT)
-#define CMD_LOAD                (0x02 << CMD_SHIFT)
-#define CMD_SEQ_LOAD            (0x03 << CMD_SHIFT)
-#define CMD_FIFO_LOAD           (0x04 << CMD_SHIFT)
-#define CMD_SEQ_FIFO_LOAD       (0x05 << CMD_SHIFT)
-#define CMD_STORE               (0x0a << CMD_SHIFT)
-#define CMD_SEQ_STORE           (0x0b << CMD_SHIFT)
-#define CMD_FIFO_STORE          (0x0c << CMD_SHIFT)
-#define CMD_SEQ_FIFO_STORE      (0x0d << CMD_SHIFT)
-#define CMD_MOVE_LEN            (0x0e << CMD_SHIFT)
-#define CMD_MOVE                (0x0f << CMD_SHIFT)
-#define CMD_OPERATION           (0x10 << CMD_SHIFT)
-#define CMD_SIGNATURE           (0x12 << CMD_SHIFT)
-#define CMD_JUMP                (0x14 << CMD_SHIFT)
-#define CMD_MATH                (0x15 << CMD_SHIFT)
-#define CMD_DESC_HDR            (0x16 << CMD_SHIFT)
-#define CMD_SHARED_DESC_HDR     (0x17 << CMD_SHIFT)
-#define CMD_SEQ_IN_PTR          (0x1e << CMD_SHIFT)
-#define CMD_SEQ_OUT_PTR         (0x1f << CMD_SHIFT)
+#define CMD_KEY			(0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY		(0x01 << CMD_SHIFT)
+#define CMD_LOAD		(0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD		(0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD		(0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD	(0x05 << CMD_SHIFT)
+#define CMD_STORE		(0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE		(0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE		(0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE	(0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN		(0x0e << CMD_SHIFT)
+#define CMD_MOVE		(0x0f << CMD_SHIFT)
+#define CMD_OPERATION		(0x10 << CMD_SHIFT)
+#define CMD_SIGNATURE		(0x12 << CMD_SHIFT)
+#define CMD_JUMP		(0x14 << CMD_SHIFT)
+#define CMD_MATH		(0x15 << CMD_SHIFT)
+#define CMD_DESC_HDR		(0x16 << CMD_SHIFT)
+#define CMD_SHARED_DESC_HDR	(0x17 << CMD_SHIFT)
+#define CMD_SEQ_IN_PTR		(0x1e << CMD_SHIFT)
+#define CMD_SEQ_OUT_PTR		(0x1f << CMD_SHIFT)
 
 /* General-purpose class selector for all commands */
-#define CLASS_SHIFT             25
-#define CLASS_MASK              (0x03 << CLASS_SHIFT)
+#define CLASS_SHIFT		25
+#define CLASS_MASK		(0x03 << CLASS_SHIFT)
 
-#define CLASS_NONE              (0x00 << CLASS_SHIFT)
-#define CLASS_1                 (0x01 << CLASS_SHIFT)
-#define CLASS_2                 (0x02 << CLASS_SHIFT)
-#define CLASS_BOTH              (0x03 << CLASS_SHIFT)
+#define CLASS_NONE		(0x00 << CLASS_SHIFT)
+#define CLASS_1			(0x01 << CLASS_SHIFT)
+#define CLASS_2			(0x02 << CLASS_SHIFT)
+#define CLASS_BOTH		(0x03 << CLASS_SHIFT)
 
 /*
  * Descriptor header command constructs
@@ -60,18 +60,18 @@
  * Do Not Run - marks a descriptor inexecutable if there was
  * a preceding error somewhere
  */
-#define HDR_DNR                 0x01000000
+#define HDR_DNR			0x01000000
 
 /*
  * ONE - should always be set. Combination of ONE (always
  * set) and ZRO (always clear) forms an endianness sanity check
  */
-#define HDR_ONE                 0x00800000
-#define HDR_ZRO                 0x00008000
+#define HDR_ONE			0x00800000
+#define HDR_ZRO			0x00008000
 
 /* Start Index or SharedDesc Length */
-#define HDR_START_IDX_MASK      0x3f
-#define HDR_START_IDX_SHIFT     16
+#define HDR_START_IDX_MASK	0x3f
+#define HDR_START_IDX_SHIFT	16
 
 /* If shared descriptor header, 6-bit length */
 #define HDR_DESCLEN_SHR_MASK  0x3f
@@ -80,62 +80,62 @@
 #define HDR_DESCLEN_MASK      0x7f
 
 /* This is a TrustedDesc (if not SharedDesc) */
-#define HDR_TRUSTED             0x00004000
+#define HDR_TRUSTED		0x00004000
 
 /* Make into TrustedDesc (if not SharedDesc) */
-#define HDR_MAKE_TRUSTED        0x00002000
+#define HDR_MAKE_TRUSTED	0x00002000
 
 /* Save context if self-shared (if SharedDesc) */
-#define HDR_SAVECTX             0x00001000
+#define HDR_SAVECTX		0x00001000
 
 /* Next item points to SharedDesc */
-#define HDR_SHARED              0x00001000
+#define HDR_SHARED		0x00001000
 
 /*
  * Reverse Execution Order - execute JobDesc first, then
  * execute SharedDesc (normally SharedDesc goes first).
  */
-#define HDR_REVERSE             0x00000800
+#define HDR_REVERSE		0x00000800
 
 /* Propogate DNR property to SharedDesc */
-#define HDR_PROP_DNR            0x00000800
+#define HDR_PROP_DNR		0x00000800
 
 /* JobDesc/SharedDesc share property */
-#define HDR_SD_SHARE_MASK       0x03
-#define HDR_SD_SHARE_SHIFT      8
-#define HDR_JD_SHARE_MASK       0x07
-#define HDR_JD_SHARE_SHIFT      8
+#define HDR_SD_SHARE_MASK	0x03
+#define HDR_SD_SHARE_SHIFT	8
+#define HDR_JD_SHARE_MASK	0x07
+#define HDR_JD_SHARE_SHIFT	8
 
-#define HDR_SHARE_NEVER         (0x00 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_WAIT          (0x01 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_SERIAL        (0x02 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_ALWAYS        (0x03 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_DEFER         (0x04 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_NEVER		(0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT		(0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL	(0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS	(0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER		(0x04 << HDR_SD_SHARE_SHIFT)
 
 /* JobDesc/SharedDesc descriptor length */
-#define HDR_JD_LENGTH_MASK      0x7f
-#define HDR_SD_LENGTH_MASK      0x3f
+#define HDR_JD_LENGTH_MASK	0x7f
+#define HDR_SD_LENGTH_MASK	0x3f
 
 /*
  * KEY/SEQ_KEY Command Constructs
  */
 
 /* Key Destination Class: 01 = Class 1, 02 - Class 2  */
-#define KEY_DEST_CLASS_SHIFT    25  /* use CLASS_1 or CLASS_2 */
-#define KEY_DEST_CLASS_MASK     (0x03 << KEY_DEST_CLASS_SHIFT)
+#define KEY_DEST_CLASS_SHIFT	25  /* use CLASS_1 or CLASS_2 */
+#define KEY_DEST_CLASS_MASK	(0x03 << KEY_DEST_CLASS_SHIFT)
 
 /* Scatter-Gather Table/Variable Length Field */
-#define KEY_SGF                 0x01000000
-#define KEY_VLF                 0x01000000
+#define KEY_SGF			0x01000000
+#define KEY_VLF			0x01000000
 
 /* Immediate - Key follows command in the descriptor */
-#define KEY_IMM                 0x00800000
+#define KEY_IMM			0x00800000
 
 /*
  * Encrypted - Key is encrypted either with the KEK, or
  * with the TDKEK if TK is set
  */
-#define KEY_ENC                 0x00400000
+#define KEY_ENC			0x00400000
 
 /*
  * No Write Back - Do not allow key to be FIFO STOREd
@@ -156,16 +156,16 @@
  * KDEST - Key Destination: 0 - class key register,
  * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
  */
-#define KEY_DEST_SHIFT          16
-#define KEY_DEST_MASK           (0x03 << KEY_DEST_SHIFT)
+#define KEY_DEST_SHIFT		16
+#define KEY_DEST_MASK		(0x03 << KEY_DEST_SHIFT)
 
-#define KEY_DEST_CLASS_REG      (0x00 << KEY_DEST_SHIFT)
-#define KEY_DEST_PKHA_E         (0x01 << KEY_DEST_SHIFT)
-#define KEY_DEST_AFHA_SBOX      (0x02 << KEY_DEST_SHIFT)
-#define KEY_DEST_MDHA_SPLIT     (0x03 << KEY_DEST_SHIFT)
+#define KEY_DEST_CLASS_REG	(0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E		(0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX	(0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT	(0x03 << KEY_DEST_SHIFT)
 
 /* Length in bytes */
-#define KEY_LENGTH_MASK         0x000003ff
+#define KEY_LENGTH_MASK		0x000003ff
 
 /*
  * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
@@ -175,25 +175,25 @@
  * Load/Store Destination: 0 = class independent CCB,
  * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
  */
-#define LDST_CLASS_SHIFT        25
-#define LDST_CLASS_MASK         (0x03 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_IND_CCB      (0x00 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_1_CCB        (0x01 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_2_CCB        (0x02 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_DECO         (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_SHIFT	25
+#define LDST_CLASS_MASK		(0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB	(0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB	(0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB	(0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO		(0x03 << LDST_CLASS_SHIFT)
 
 /* Scatter-Gather Table/Variable Length Field */
-#define LDST_SGF                0x01000000
+#define LDST_SGF		0x01000000
 #define LDST_VLF		LDST_SGF
 
-/* Immediate - Key follows this command in descriptor    */
-#define LDST_IMM_MASK           1
-#define LDST_IMM_SHIFT          23
-#define LDST_IMM                (LDST_IMM_MASK << LDST_IMM_SHIFT)
+/* Immediate - Key follows this command in descriptor	 */
+#define LDST_IMM_MASK		1
+#define LDST_IMM_SHIFT		23
+#define LDST_IMM		(LDST_IMM_MASK << LDST_IMM_SHIFT)
 
 /* SRC/DST - Destination for LOAD, Source for STORE   */
-#define LDST_SRCDST_SHIFT       16
-#define LDST_SRCDST_MASK        (0x7f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_SHIFT	16
+#define LDST_SRCDST_MASK	(0x7f << LDST_SRCDST_SHIFT)
 
 #define LDST_SRCDST_BYTE_CONTEXT	(0x20 << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_BYTE_KEY		(0x40 << LDST_SRCDST_SHIFT)
@@ -205,60 +205,60 @@
 #define LDST_SRCDST_WORD_DATASZ_REG	(0x02 << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_WORD_ICVSZ_REG	(0x03 << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_WORD_CHACTRL	(0x06 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECOCTRL       (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL	(0x06 << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_WORD_IRQCTRL	(0x07 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_PCLOVRD   (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD	(0x07 << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_WORD_CLRW		(0x08 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH0     (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0	(0x08 << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_WORD_STAT		(0x09 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH1     (0x09 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH2     (0x0a << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_AAD_SZ    (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH3     (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ  (0x0c << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_ALTDS_CLASS1   (0x0f << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_A_SZ      (0x10 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_B_SZ      (0x11 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_N_SZ      (0x12 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_E_SZ      (0x13 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DESCBUF        (0x40 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_INFO_FIFO      (0x7a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1	(0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2	(0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ	(0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3	(0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_ICV_SZ	(0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1	(0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ	(0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ	(0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ	(0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ	(0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF	(0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO	(0x7a << LDST_SRCDST_SHIFT)
 
-/* Offset in source/destination                        */
-#define LDST_OFFSET_SHIFT       8
-#define LDST_OFFSET_MASK        (0xff << LDST_OFFSET_SHIFT)
+/* Offset in source/destination			       */
+#define LDST_OFFSET_SHIFT	8
+#define LDST_OFFSET_MASK	(0xff << LDST_OFFSET_SHIFT)
 
 /* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
 /* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
-#define LDOFF_CHG_SHARE_SHIFT        0
-#define LDOFF_CHG_SHARE_MASK         (0x3 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_NEVER        (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_SHIFT	     0
+#define LDOFF_CHG_SHARE_MASK	     (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER	     (0x1 << LDOFF_CHG_SHARE_SHIFT)
 #define LDOFF_CHG_SHARE_OK_NO_PROP   (0x2 << LDOFF_CHG_SHARE_SHIFT)
 #define LDOFF_CHG_SHARE_OK_PROP      (0x3 << LDOFF_CHG_SHARE_SHIFT)
 
-#define LDOFF_ENABLE_AUTO_NFIFO         (1 << 2)
-#define LDOFF_DISABLE_AUTO_NFIFO        (1 << 3)
+#define LDOFF_ENABLE_AUTO_NFIFO		(1 << 2)
+#define LDOFF_DISABLE_AUTO_NFIFO	(1 << 3)
 
-#define LDOFF_CHG_NONSEQLIODN_SHIFT     4
-#define LDOFF_CHG_NONSEQLIODN_MASK      (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_SEQ       (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_NON_SEQ   (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_TRUSTED   (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SHIFT	4
+#define LDOFF_CHG_NONSEQLIODN_MASK	(0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ	(0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ	(0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED	(0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
 
 #define LDOFF_CHG_SEQLIODN_SHIFT     6
 #define LDOFF_CHG_SEQLIODN_MASK      (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_SEQ       (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ	     (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
 #define LDOFF_CHG_SEQLIODN_NON_SEQ   (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
 #define LDOFF_CHG_SEQLIODN_TRUSTED   (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
 
-/* Data length in bytes                                 */
-#define LDST_LEN_SHIFT          0
-#define LDST_LEN_MASK           (0xff << LDST_LEN_SHIFT)
+/* Data length in bytes					*/
+#define LDST_LEN_SHIFT		0
+#define LDST_LEN_MASK		(0xff << LDST_LEN_SHIFT)
 
 /* Special Length definitions when dst=deco-ctrl */
-#define LDLEN_ENABLE_OSL_COUNT      (1 << 7)
+#define LDLEN_ENABLE_OSL_COUNT	    (1 << 7)
 #define LDLEN_RST_CHA_OFIFO_PTR     (1 << 6)
-#define LDLEN_RST_OFIFO             (1 << 5)
+#define LDLEN_RST_OFIFO		    (1 << 5)
 #define LDLEN_SET_OFIFO_OFF_VALID   (1 << 4)
 #define LDLEN_SET_OFIFO_OFF_RSVD    (1 << 3)
 #define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
@@ -274,34 +274,34 @@
  * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
  * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
  */
-#define FIFOLD_CLASS_SHIFT      25
-#define FIFOLD_CLASS_MASK       (0x03 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_SKIP       (0x00 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_CLASS1     (0x01 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_CLASS2     (0x02 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_BOTH       (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SHIFT	25
+#define FIFOLD_CLASS_MASK	(0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP	(0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1	(0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2	(0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH	(0x03 << FIFOLD_CLASS_SHIFT)
 
-#define FIFOST_CLASS_SHIFT      25
-#define FIFOST_CLASS_MASK       (0x03 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_NORMAL     (0x00 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_CLASS1KEY  (0x01 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_CLASS2KEY  (0x02 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_SHIFT	25
+#define FIFOST_CLASS_MASK	(0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL	(0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY	(0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY	(0x02 << FIFOST_CLASS_SHIFT)
 
 /*
  * Scatter-Gather Table/Variable Length Field
  * If set for FIFO_LOAD, refers to a SG table. Within
  * SEQ_FIFO_LOAD, is variable input sequence
  */
-#define FIFOLDST_SGF_SHIFT      24
-#define FIFOLDST_SGF_MASK       (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_VLF_MASK       (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_SGF            (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_VLF            (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF_SHIFT	24
+#define FIFOLDST_SGF_MASK	(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK	(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF		(1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF		(1 << FIFOLDST_SGF_SHIFT)
 
 /* Immediate - Data follows command in descriptor */
 #define FIFOLD_IMM_SHIFT      23
 #define FIFOLD_IMM_MASK       (1 << FIFOLD_IMM_SHIFT)
-#define FIFOLD_IMM            (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM	      (1 << FIFOLD_IMM_SHIFT)
 
 /* Continue - Not the last FIFO store to come */
 #define FIFOST_CONT_SHIFT     23
@@ -312,770 +312,770 @@
  * Extended Length - use 32-bit extended length that
  * follows the pointer field. Illegal with IMM set
  */
-#define FIFOLDST_EXT_SHIFT      22
-#define FIFOLDST_EXT_MASK       (1 << FIFOLDST_EXT_SHIFT)
-#define FIFOLDST_EXT            (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT_SHIFT	22
+#define FIFOLDST_EXT_MASK	(1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT		(1 << FIFOLDST_EXT_SHIFT)
 
 /* Input data type.*/
-#define FIFOLD_TYPE_SHIFT       16
-#define FIFOLD_CONT_TYPE_SHIFT  19 /* shift past last-flush bits */
-#define FIFOLD_TYPE_MASK        (0x3f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_SHIFT	16
+#define FIFOLD_CONT_TYPE_SHIFT	19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK	(0x3f << FIFOLD_TYPE_SHIFT)
 
 /* PK types */
-#define FIFOLD_TYPE_PK          (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_MASK     (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK		(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK	(0x30 << FIFOLD_TYPE_SHIFT)
 #define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A0       (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A1       (0x01 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A2       (0x02 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A3       (0x03 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B0       (0x04 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B1       (0x05 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B2       (0x06 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B3       (0x07 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_N        (0x08 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A        (0x0c << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B        (0x0d << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0	(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1	(0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2	(0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3	(0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0	(0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1	(0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2	(0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3	(0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N	(0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A	(0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B	(0x0d << FIFOLD_TYPE_SHIFT)
 
 /* Other types. Need to OR in last/flush bits as desired */
-#define FIFOLD_TYPE_MSG_MASK    (0x38 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_MSG         (0x10 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_MSG1OUT2    (0x18 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_IV          (0x20 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_BITDATA     (0x28 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_AAD         (0x30 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_ICV         (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG_MASK	(0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG		(0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2	(0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV		(0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA	(0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD		(0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV		(0x38 << FIFOLD_TYPE_SHIFT)
 
 /* Last/Flush bits for use with "other" types above */
-#define FIFOLD_TYPE_ACT_MASK    (0x07 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_NOACTION    (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_FLUSH1      (0x01 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST1       (0x02 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST2FLUSH  (0x03 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST2       (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ACT_MASK	(0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION	(0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1	(0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1	(0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH	(0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2	(0x04 << FIFOLD_TYPE_SHIFT)
 #define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LASTBOTH    (0x06 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LASTBOTHFL  (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH	(0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL	(0x07 << FIFOLD_TYPE_SHIFT)
 
-#define FIFOLDST_LEN_MASK       0xffff
-#define FIFOLDST_EXT_LEN_MASK   0xffffffff
+#define FIFOLDST_LEN_MASK	0xffff
+#define FIFOLDST_EXT_LEN_MASK	0xffffffff
 
 /* Output data types */
-#define FIFOST_TYPE_SHIFT       16
-#define FIFOST_TYPE_MASK        (0x3f << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SHIFT	16
+#define FIFOST_TYPE_MASK	(0x3f << FIFOST_TYPE_SHIFT)
 
-#define FIFOST_TYPE_PKHA_A0      (0x00 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A1      (0x01 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A2      (0x02 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A3      (0x03 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B0      (0x04 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B1      (0x05 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B2      (0x06 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B3      (0x07 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_N       (0x08 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A       (0x0c << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B       (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A0	 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1	 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2	 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3	 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0	 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1	 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2	 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3	 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N	 (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A	 (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B	 (0x0d << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_PKHA_E_JKEK  (0x22 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_PKHA_E_TKEK  (0x23 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_KEY_KEK      (0x24 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_KEY_TKEK     (0x25 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SPLIT_KEK    (0x26 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SPLIT_TKEK   (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK	 (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK	 (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK	 (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK	 (0x27 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_OUTFIFO_KEK  (0x28 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_RNGSTORE     (0x34 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_RNGFIFO      (0x35 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SKIP         (0x3f << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE	 (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO	 (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP	 (0x3f << FIFOST_TYPE_SHIFT)
 
 /*
  * OPERATION Command Constructs
  */
 
 /* Operation type selectors - OP TYPE */
-#define OP_TYPE_SHIFT           24
-#define OP_TYPE_MASK            (0x07 << OP_TYPE_SHIFT)
+#define OP_TYPE_SHIFT		24
+#define OP_TYPE_MASK		(0x07 << OP_TYPE_SHIFT)
 
-#define OP_TYPE_UNI_PROTOCOL    (0x00 << OP_TYPE_SHIFT)
-#define OP_TYPE_PK              (0x01 << OP_TYPE_SHIFT)
-#define OP_TYPE_CLASS1_ALG      (0x02 << OP_TYPE_SHIFT)
-#define OP_TYPE_CLASS2_ALG      (0x04 << OP_TYPE_SHIFT)
-#define OP_TYPE_DECAP_PROTOCOL  (0x06 << OP_TYPE_SHIFT)
-#define OP_TYPE_ENCAP_PROTOCOL  (0x07 << OP_TYPE_SHIFT)
+#define OP_TYPE_UNI_PROTOCOL	(0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK		(0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG	(0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG	(0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL	(0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL	(0x07 << OP_TYPE_SHIFT)
 
 /* ProtocolID selectors - PROTID */
-#define OP_PCLID_SHIFT          16
-#define OP_PCLID_MASK           (0xff << 16)
+#define OP_PCLID_SHIFT		16
+#define OP_PCLID_MASK		(0xff << 16)
 
 /* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
-#define OP_PCLID_IKEV1_PRF      (0x01 << OP_PCLID_SHIFT)
-#define OP_PCLID_IKEV2_PRF      (0x02 << OP_PCLID_SHIFT)
-#define OP_PCLID_SSL30_PRF      (0x08 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS10_PRF      (0x09 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS11_PRF      (0x0a << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS10_PRF     (0x0c << OP_PCLID_SHIFT)
-#define OP_PCLID_PRF            (0x06 << OP_PCLID_SHIFT)
-#define OP_PCLID_BLOB           (0x0d << OP_PCLID_SHIFT)
-#define OP_PCLID_SECRETKEY      (0x11 << OP_PCLID_SHIFT)
-#define OP_PCLID_PUBLICKEYPAIR  (0x14 << OP_PCLID_SHIFT)
-#define OP_PCLID_DSASIGN        (0x15 << OP_PCLID_SHIFT)
-#define OP_PCLID_DSAVERIFY      (0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV1_PRF	(0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF	(0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF	(0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF	(0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF	(0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF	(0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PRF		(0x06 << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB		(0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_SECRETKEY	(0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR	(0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN	(0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY	(0x16 << OP_PCLID_SHIFT)
 
 /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
-#define OP_PCLID_IPSEC          (0x01 << OP_PCLID_SHIFT)
-#define OP_PCLID_SRTP           (0x02 << OP_PCLID_SHIFT)
-#define OP_PCLID_MACSEC         (0x03 << OP_PCLID_SHIFT)
-#define OP_PCLID_WIFI           (0x04 << OP_PCLID_SHIFT)
-#define OP_PCLID_WIMAX          (0x05 << OP_PCLID_SHIFT)
-#define OP_PCLID_SSL30          (0x08 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS10          (0x09 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS11          (0x0a << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS12          (0x0b << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS           (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_IPSEC		(0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP		(0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC		(0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI		(0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX		(0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30		(0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10		(0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11		(0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12		(0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS		(0x0c << OP_PCLID_SHIFT)
 
 /*
  * ProtocolInfo selectors
  */
-#define OP_PCLINFO_MASK                          0xffff
+#define OP_PCLINFO_MASK				 0xffff
 
 /* for OP_PCLID_IPSEC */
-#define OP_PCL_IPSEC_CIPHER_MASK                 0xff00
-#define OP_PCL_IPSEC_AUTH_MASK                   0x00ff
+#define OP_PCL_IPSEC_CIPHER_MASK		 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK			 0x00ff
 
-#define OP_PCL_IPSEC_DES_IV64                    0x0100
-#define OP_PCL_IPSEC_DES                         0x0200
-#define OP_PCL_IPSEC_3DES                        0x0300
-#define OP_PCL_IPSEC_AES_CBC                     0x0c00
-#define OP_PCL_IPSEC_AES_CTR                     0x0d00
-#define OP_PCL_IPSEC_AES_XTS                     0x1600
-#define OP_PCL_IPSEC_AES_CCM8                    0x0e00
-#define OP_PCL_IPSEC_AES_CCM12                   0x0f00
-#define OP_PCL_IPSEC_AES_CCM16                   0x1000
-#define OP_PCL_IPSEC_AES_GCM8                    0x1200
-#define OP_PCL_IPSEC_AES_GCM12                   0x1300
-#define OP_PCL_IPSEC_AES_GCM16                   0x1400
+#define OP_PCL_IPSEC_DES_IV64			 0x0100
+#define OP_PCL_IPSEC_DES			 0x0200
+#define OP_PCL_IPSEC_3DES			 0x0300
+#define OP_PCL_IPSEC_AES_CBC			 0x0c00
+#define OP_PCL_IPSEC_AES_CTR			 0x0d00
+#define OP_PCL_IPSEC_AES_XTS			 0x1600
+#define OP_PCL_IPSEC_AES_CCM8			 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12			 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16			 0x1000
+#define OP_PCL_IPSEC_AES_GCM8			 0x1200
+#define OP_PCL_IPSEC_AES_GCM12			 0x1300
+#define OP_PCL_IPSEC_AES_GCM16			 0x1400
 
-#define OP_PCL_IPSEC_HMAC_NULL                   0x0000
-#define OP_PCL_IPSEC_HMAC_MD5_96                 0x0001
-#define OP_PCL_IPSEC_HMAC_SHA1_96                0x0002
-#define OP_PCL_IPSEC_AES_XCBC_MAC_96             0x0005
-#define OP_PCL_IPSEC_HMAC_MD5_128                0x0006
-#define OP_PCL_IPSEC_HMAC_SHA1_160               0x0007
-#define OP_PCL_IPSEC_HMAC_SHA2_256_128           0x000c
-#define OP_PCL_IPSEC_HMAC_SHA2_384_192           0x000d
-#define OP_PCL_IPSEC_HMAC_SHA2_512_256           0x000e
+#define OP_PCL_IPSEC_HMAC_NULL			 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96		 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96		 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96		 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128		 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160		 0x0007
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128		 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192		 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256		 0x000e
 
 /* For SRTP - OP_PCLID_SRTP */
-#define OP_PCL_SRTP_CIPHER_MASK                  0xff00
-#define OP_PCL_SRTP_AUTH_MASK                    0x00ff
+#define OP_PCL_SRTP_CIPHER_MASK			 0xff00
+#define OP_PCL_SRTP_AUTH_MASK			 0x00ff
 
-#define OP_PCL_SRTP_AES_CTR                      0x0d00
+#define OP_PCL_SRTP_AES_CTR			 0x0d00
 
-#define OP_PCL_SRTP_HMAC_SHA1_160                0x0007
+#define OP_PCL_SRTP_HMAC_SHA1_160		 0x0007
 
 /* For SSL 3.0 - OP_PCLID_SSL30 */
-#define OP_PCL_SSL30_AES_128_CBC_SHA             0x002f
-#define OP_PCL_SSL30_AES_128_CBC_SHA_2           0x0030
-#define OP_PCL_SSL30_AES_128_CBC_SHA_3           0x0031
-#define OP_PCL_SSL30_AES_128_CBC_SHA_4           0x0032
-#define OP_PCL_SSL30_AES_128_CBC_SHA_5           0x0033
-#define OP_PCL_SSL30_AES_128_CBC_SHA_6           0x0034
-#define OP_PCL_SSL30_AES_128_CBC_SHA_7           0x008c
-#define OP_PCL_SSL30_AES_128_CBC_SHA_8           0x0090
-#define OP_PCL_SSL30_AES_128_CBC_SHA_9           0x0094
-#define OP_PCL_SSL30_AES_128_CBC_SHA_10          0xc004
-#define OP_PCL_SSL30_AES_128_CBC_SHA_11          0xc009
-#define OP_PCL_SSL30_AES_128_CBC_SHA_12          0xc00e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_13          0xc013
-#define OP_PCL_SSL30_AES_128_CBC_SHA_14          0xc018
-#define OP_PCL_SSL30_AES_128_CBC_SHA_15          0xc01d
-#define OP_PCL_SSL30_AES_128_CBC_SHA_16          0xc01e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_17          0xc01f
+#define OP_PCL_SSL30_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17		 0xc01f
 
-#define OP_PCL_SSL30_AES_256_CBC_SHA             0x0035
-#define OP_PCL_SSL30_AES_256_CBC_SHA_2           0x0036
-#define OP_PCL_SSL30_AES_256_CBC_SHA_3           0x0037
-#define OP_PCL_SSL30_AES_256_CBC_SHA_4           0x0038
-#define OP_PCL_SSL30_AES_256_CBC_SHA_5           0x0039
-#define OP_PCL_SSL30_AES_256_CBC_SHA_6           0x003a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_7           0x008d
-#define OP_PCL_SSL30_AES_256_CBC_SHA_8           0x0091
-#define OP_PCL_SSL30_AES_256_CBC_SHA_9           0x0095
-#define OP_PCL_SSL30_AES_256_CBC_SHA_10          0xc005
-#define OP_PCL_SSL30_AES_256_CBC_SHA_11          0xc00a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_12          0xc00f
-#define OP_PCL_SSL30_AES_256_CBC_SHA_13          0xc014
-#define OP_PCL_SSL30_AES_256_CBC_SHA_14          0xc019
-#define OP_PCL_SSL30_AES_256_CBC_SHA_15          0xc020
-#define OP_PCL_SSL30_AES_256_CBC_SHA_16          0xc021
-#define OP_PCL_SSL30_AES_256_CBC_SHA_17          0xc022
+#define OP_PCL_SSL30_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17		 0xc022
 
-#define OP_PCL_SSL30_3DES_EDE_CBC_MD5            0x0023
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5		 0x0023
 
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA            0x001f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2          0x008b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3          0x008f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4          0x0093
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5          0x000a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6          0x000d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7          0x0010
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8          0x0013
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9          0x0016
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10         0x001b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11         0xc003
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12         0xc008
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13         0xc00d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14         0xc012
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15         0xc017
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16         0xc01a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17         0xc01b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18         0xc01c
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18	 0xc01c
 
-#define OP_PCL_SSL30_DES40_CBC_MD5               0x0029
+#define OP_PCL_SSL30_DES40_CBC_MD5		 0x0029
 
-#define OP_PCL_SSL30_DES_CBC_MD5                 0x0022
+#define OP_PCL_SSL30_DES_CBC_MD5		 0x0022
 
-#define OP_PCL_SSL30_DES40_CBC_SHA               0x0008
-#define OP_PCL_SSL30_DES40_CBC_SHA_2             0x000b
-#define OP_PCL_SSL30_DES40_CBC_SHA_3             0x000e
-#define OP_PCL_SSL30_DES40_CBC_SHA_4             0x0011
-#define OP_PCL_SSL30_DES40_CBC_SHA_5             0x0014
-#define OP_PCL_SSL30_DES40_CBC_SHA_6             0x0019
-#define OP_PCL_SSL30_DES40_CBC_SHA_7             0x0026
+#define OP_PCL_SSL30_DES40_CBC_SHA		 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7		 0x0026
 
-#define OP_PCL_SSL30_DES_CBC_SHA                 0x001e
-#define OP_PCL_SSL30_DES_CBC_SHA_2               0x0009
-#define OP_PCL_SSL30_DES_CBC_SHA_3               0x000c
-#define OP_PCL_SSL30_DES_CBC_SHA_4               0x000f
-#define OP_PCL_SSL30_DES_CBC_SHA_5               0x0012
-#define OP_PCL_SSL30_DES_CBC_SHA_6               0x0015
-#define OP_PCL_SSL30_DES_CBC_SHA_7               0x001a
+#define OP_PCL_SSL30_DES_CBC_SHA		 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7		 0x001a
 
-#define OP_PCL_SSL30_RC4_128_MD5                 0x0024
-#define OP_PCL_SSL30_RC4_128_MD5_2               0x0004
-#define OP_PCL_SSL30_RC4_128_MD5_3               0x0018
+#define OP_PCL_SSL30_RC4_128_MD5		 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2		 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3		 0x0018
 
-#define OP_PCL_SSL30_RC4_40_MD5                  0x002b
-#define OP_PCL_SSL30_RC4_40_MD5_2                0x0003
-#define OP_PCL_SSL30_RC4_40_MD5_3                0x0017
+#define OP_PCL_SSL30_RC4_40_MD5			 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2		 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3		 0x0017
 
-#define OP_PCL_SSL30_RC4_128_SHA                 0x0020
-#define OP_PCL_SSL30_RC4_128_SHA_2               0x008a
-#define OP_PCL_SSL30_RC4_128_SHA_3               0x008e
-#define OP_PCL_SSL30_RC4_128_SHA_4               0x0092
-#define OP_PCL_SSL30_RC4_128_SHA_5               0x0005
-#define OP_PCL_SSL30_RC4_128_SHA_6               0xc002
-#define OP_PCL_SSL30_RC4_128_SHA_7               0xc007
-#define OP_PCL_SSL30_RC4_128_SHA_8               0xc00c
-#define OP_PCL_SSL30_RC4_128_SHA_9               0xc011
-#define OP_PCL_SSL30_RC4_128_SHA_10              0xc016
+#define OP_PCL_SSL30_RC4_128_SHA		 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2		 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3		 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4		 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5		 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6		 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7		 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9		 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10		 0xc016
 
-#define OP_PCL_SSL30_RC4_40_SHA                  0x0028
+#define OP_PCL_SSL30_RC4_40_SHA			 0x0028
 
 
 /* For TLS 1.0 - OP_PCLID_TLS10 */
-#define OP_PCL_TLS10_AES_128_CBC_SHA             0x002f
-#define OP_PCL_TLS10_AES_128_CBC_SHA_2           0x0030
-#define OP_PCL_TLS10_AES_128_CBC_SHA_3           0x0031
-#define OP_PCL_TLS10_AES_128_CBC_SHA_4           0x0032
-#define OP_PCL_TLS10_AES_128_CBC_SHA_5           0x0033
-#define OP_PCL_TLS10_AES_128_CBC_SHA_6           0x0034
-#define OP_PCL_TLS10_AES_128_CBC_SHA_7           0x008c
-#define OP_PCL_TLS10_AES_128_CBC_SHA_8           0x0090
-#define OP_PCL_TLS10_AES_128_CBC_SHA_9           0x0094
-#define OP_PCL_TLS10_AES_128_CBC_SHA_10          0xc004
-#define OP_PCL_TLS10_AES_128_CBC_SHA_11          0xc009
-#define OP_PCL_TLS10_AES_128_CBC_SHA_12          0xc00e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_13          0xc013
-#define OP_PCL_TLS10_AES_128_CBC_SHA_14          0xc018
-#define OP_PCL_TLS10_AES_128_CBC_SHA_15          0xc01d
-#define OP_PCL_TLS10_AES_128_CBC_SHA_16          0xc01e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_17          0xc01f
+#define OP_PCL_TLS10_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17		 0xc01f
 
-#define OP_PCL_TLS10_AES_256_CBC_SHA             0x0035
-#define OP_PCL_TLS10_AES_256_CBC_SHA_2           0x0036
-#define OP_PCL_TLS10_AES_256_CBC_SHA_3           0x0037
-#define OP_PCL_TLS10_AES_256_CBC_SHA_4           0x0038
-#define OP_PCL_TLS10_AES_256_CBC_SHA_5           0x0039
-#define OP_PCL_TLS10_AES_256_CBC_SHA_6           0x003a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_7           0x008d
-#define OP_PCL_TLS10_AES_256_CBC_SHA_8           0x0091
-#define OP_PCL_TLS10_AES_256_CBC_SHA_9           0x0095
-#define OP_PCL_TLS10_AES_256_CBC_SHA_10          0xc005
-#define OP_PCL_TLS10_AES_256_CBC_SHA_11          0xc00a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_12          0xc00f
-#define OP_PCL_TLS10_AES_256_CBC_SHA_13          0xc014
-#define OP_PCL_TLS10_AES_256_CBC_SHA_14          0xc019
-#define OP_PCL_TLS10_AES_256_CBC_SHA_15          0xc020
-#define OP_PCL_TLS10_AES_256_CBC_SHA_16          0xc021
-#define OP_PCL_TLS10_AES_256_CBC_SHA_17          0xc022
+#define OP_PCL_TLS10_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17		 0xc022
 
-/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5            0x0023 */
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5	    0x0023 */
 
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA            0x001f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2          0x008b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3          0x008f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4          0x0093
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5          0x000a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6          0x000d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7          0x0010
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8          0x0013
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9          0x0016
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10         0x001b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11         0xc003
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12         0xc008
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13         0xc00d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14         0xc012
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15         0xc017
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16         0xc01a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17         0xc01b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18         0xc01c
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18	 0xc01c
 
-#define OP_PCL_TLS10_DES40_CBC_MD5               0x0029
+#define OP_PCL_TLS10_DES40_CBC_MD5		 0x0029
 
-#define OP_PCL_TLS10_DES_CBC_MD5                 0x0022
+#define OP_PCL_TLS10_DES_CBC_MD5		 0x0022
 
-#define OP_PCL_TLS10_DES40_CBC_SHA               0x0008
-#define OP_PCL_TLS10_DES40_CBC_SHA_2             0x000b
-#define OP_PCL_TLS10_DES40_CBC_SHA_3             0x000e
-#define OP_PCL_TLS10_DES40_CBC_SHA_4             0x0011
-#define OP_PCL_TLS10_DES40_CBC_SHA_5             0x0014
-#define OP_PCL_TLS10_DES40_CBC_SHA_6             0x0019
-#define OP_PCL_TLS10_DES40_CBC_SHA_7             0x0026
+#define OP_PCL_TLS10_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7		 0x0026
 
 
-#define OP_PCL_TLS10_DES_CBC_SHA                 0x001e
-#define OP_PCL_TLS10_DES_CBC_SHA_2               0x0009
-#define OP_PCL_TLS10_DES_CBC_SHA_3               0x000c
-#define OP_PCL_TLS10_DES_CBC_SHA_4               0x000f
-#define OP_PCL_TLS10_DES_CBC_SHA_5               0x0012
-#define OP_PCL_TLS10_DES_CBC_SHA_6               0x0015
-#define OP_PCL_TLS10_DES_CBC_SHA_7               0x001a
+#define OP_PCL_TLS10_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7		 0x001a
 
-#define OP_PCL_TLS10_RC4_128_MD5                 0x0024
-#define OP_PCL_TLS10_RC4_128_MD5_2               0x0004
-#define OP_PCL_TLS10_RC4_128_MD5_3               0x0018
+#define OP_PCL_TLS10_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3		 0x0018
 
-#define OP_PCL_TLS10_RC4_40_MD5                  0x002b
-#define OP_PCL_TLS10_RC4_40_MD5_2                0x0003
-#define OP_PCL_TLS10_RC4_40_MD5_3                0x0017
+#define OP_PCL_TLS10_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3		 0x0017
 
-#define OP_PCL_TLS10_RC4_128_SHA                 0x0020
-#define OP_PCL_TLS10_RC4_128_SHA_2               0x008a
-#define OP_PCL_TLS10_RC4_128_SHA_3               0x008e
-#define OP_PCL_TLS10_RC4_128_SHA_4               0x0092
-#define OP_PCL_TLS10_RC4_128_SHA_5               0x0005
-#define OP_PCL_TLS10_RC4_128_SHA_6               0xc002
-#define OP_PCL_TLS10_RC4_128_SHA_7               0xc007
-#define OP_PCL_TLS10_RC4_128_SHA_8               0xc00c
-#define OP_PCL_TLS10_RC4_128_SHA_9               0xc011
-#define OP_PCL_TLS10_RC4_128_SHA_10              0xc016
+#define OP_PCL_TLS10_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10		 0xc016
 
-#define OP_PCL_TLS10_RC4_40_SHA                  0x0028
+#define OP_PCL_TLS10_RC4_40_SHA			 0x0028
 
-#define OP_PCL_TLS10_3DES_EDE_CBC_MD5            0xff23
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160         0xff30
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224         0xff34
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256         0xff36
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384         0xff33
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512         0xff35
-#define OP_PCL_TLS10_AES_128_CBC_SHA160          0xff80
-#define OP_PCL_TLS10_AES_128_CBC_SHA224          0xff84
-#define OP_PCL_TLS10_AES_128_CBC_SHA256          0xff86
-#define OP_PCL_TLS10_AES_128_CBC_SHA384          0xff83
-#define OP_PCL_TLS10_AES_128_CBC_SHA512          0xff85
-#define OP_PCL_TLS10_AES_192_CBC_SHA160          0xff20
-#define OP_PCL_TLS10_AES_192_CBC_SHA224          0xff24
-#define OP_PCL_TLS10_AES_192_CBC_SHA256          0xff26
-#define OP_PCL_TLS10_AES_192_CBC_SHA384          0xff23
-#define OP_PCL_TLS10_AES_192_CBC_SHA512          0xff25
-#define OP_PCL_TLS10_AES_256_CBC_SHA160          0xff60
-#define OP_PCL_TLS10_AES_256_CBC_SHA224          0xff64
-#define OP_PCL_TLS10_AES_256_CBC_SHA256          0xff66
-#define OP_PCL_TLS10_AES_256_CBC_SHA384          0xff63
-#define OP_PCL_TLS10_AES_256_CBC_SHA512          0xff65
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512		 0xff65
 
 
 
 /* For TLS 1.1 - OP_PCLID_TLS11 */
-#define OP_PCL_TLS11_AES_128_CBC_SHA             0x002f
-#define OP_PCL_TLS11_AES_128_CBC_SHA_2           0x0030
-#define OP_PCL_TLS11_AES_128_CBC_SHA_3           0x0031
-#define OP_PCL_TLS11_AES_128_CBC_SHA_4           0x0032
-#define OP_PCL_TLS11_AES_128_CBC_SHA_5           0x0033
-#define OP_PCL_TLS11_AES_128_CBC_SHA_6           0x0034
-#define OP_PCL_TLS11_AES_128_CBC_SHA_7           0x008c
-#define OP_PCL_TLS11_AES_128_CBC_SHA_8           0x0090
-#define OP_PCL_TLS11_AES_128_CBC_SHA_9           0x0094
-#define OP_PCL_TLS11_AES_128_CBC_SHA_10          0xc004
-#define OP_PCL_TLS11_AES_128_CBC_SHA_11          0xc009
-#define OP_PCL_TLS11_AES_128_CBC_SHA_12          0xc00e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_13          0xc013
-#define OP_PCL_TLS11_AES_128_CBC_SHA_14          0xc018
-#define OP_PCL_TLS11_AES_128_CBC_SHA_15          0xc01d
-#define OP_PCL_TLS11_AES_128_CBC_SHA_16          0xc01e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_17          0xc01f
+#define OP_PCL_TLS11_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17		 0xc01f
 
-#define OP_PCL_TLS11_AES_256_CBC_SHA             0x0035
-#define OP_PCL_TLS11_AES_256_CBC_SHA_2           0x0036
-#define OP_PCL_TLS11_AES_256_CBC_SHA_3           0x0037
-#define OP_PCL_TLS11_AES_256_CBC_SHA_4           0x0038
-#define OP_PCL_TLS11_AES_256_CBC_SHA_5           0x0039
-#define OP_PCL_TLS11_AES_256_CBC_SHA_6           0x003a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_7           0x008d
-#define OP_PCL_TLS11_AES_256_CBC_SHA_8           0x0091
-#define OP_PCL_TLS11_AES_256_CBC_SHA_9           0x0095
-#define OP_PCL_TLS11_AES_256_CBC_SHA_10          0xc005
-#define OP_PCL_TLS11_AES_256_CBC_SHA_11          0xc00a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_12          0xc00f
-#define OP_PCL_TLS11_AES_256_CBC_SHA_13          0xc014
-#define OP_PCL_TLS11_AES_256_CBC_SHA_14          0xc019
-#define OP_PCL_TLS11_AES_256_CBC_SHA_15          0xc020
-#define OP_PCL_TLS11_AES_256_CBC_SHA_16          0xc021
-#define OP_PCL_TLS11_AES_256_CBC_SHA_17          0xc022
+#define OP_PCL_TLS11_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17		 0xc022
 
-/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5            0x0023 */
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5	    0x0023 */
 
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA            0x001f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2          0x008b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3          0x008f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4          0x0093
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5          0x000a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6          0x000d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7          0x0010
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8          0x0013
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9          0x0016
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10         0x001b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11         0xc003
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12         0xc008
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13         0xc00d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14         0xc012
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15         0xc017
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16         0xc01a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17         0xc01b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18         0xc01c
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18	 0xc01c
 
-#define OP_PCL_TLS11_DES40_CBC_MD5               0x0029
+#define OP_PCL_TLS11_DES40_CBC_MD5		 0x0029
 
-#define OP_PCL_TLS11_DES_CBC_MD5                 0x0022
+#define OP_PCL_TLS11_DES_CBC_MD5		 0x0022
 
-#define OP_PCL_TLS11_DES40_CBC_SHA               0x0008
-#define OP_PCL_TLS11_DES40_CBC_SHA_2             0x000b
-#define OP_PCL_TLS11_DES40_CBC_SHA_3             0x000e
-#define OP_PCL_TLS11_DES40_CBC_SHA_4             0x0011
-#define OP_PCL_TLS11_DES40_CBC_SHA_5             0x0014
-#define OP_PCL_TLS11_DES40_CBC_SHA_6             0x0019
-#define OP_PCL_TLS11_DES40_CBC_SHA_7             0x0026
+#define OP_PCL_TLS11_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7		 0x0026
 
-#define OP_PCL_TLS11_DES_CBC_SHA                 0x001e
-#define OP_PCL_TLS11_DES_CBC_SHA_2               0x0009
-#define OP_PCL_TLS11_DES_CBC_SHA_3               0x000c
-#define OP_PCL_TLS11_DES_CBC_SHA_4               0x000f
-#define OP_PCL_TLS11_DES_CBC_SHA_5               0x0012
-#define OP_PCL_TLS11_DES_CBC_SHA_6               0x0015
-#define OP_PCL_TLS11_DES_CBC_SHA_7               0x001a
+#define OP_PCL_TLS11_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7		 0x001a
 
-#define OP_PCL_TLS11_RC4_128_MD5                 0x0024
-#define OP_PCL_TLS11_RC4_128_MD5_2               0x0004
-#define OP_PCL_TLS11_RC4_128_MD5_3               0x0018
+#define OP_PCL_TLS11_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3		 0x0018
 
-#define OP_PCL_TLS11_RC4_40_MD5                  0x002b
-#define OP_PCL_TLS11_RC4_40_MD5_2                0x0003
-#define OP_PCL_TLS11_RC4_40_MD5_3                0x0017
+#define OP_PCL_TLS11_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3		 0x0017
 
-#define OP_PCL_TLS11_RC4_128_SHA                 0x0020
-#define OP_PCL_TLS11_RC4_128_SHA_2               0x008a
-#define OP_PCL_TLS11_RC4_128_SHA_3               0x008e
-#define OP_PCL_TLS11_RC4_128_SHA_4               0x0092
-#define OP_PCL_TLS11_RC4_128_SHA_5               0x0005
-#define OP_PCL_TLS11_RC4_128_SHA_6               0xc002
-#define OP_PCL_TLS11_RC4_128_SHA_7               0xc007
-#define OP_PCL_TLS11_RC4_128_SHA_8               0xc00c
-#define OP_PCL_TLS11_RC4_128_SHA_9               0xc011
-#define OP_PCL_TLS11_RC4_128_SHA_10              0xc016
+#define OP_PCL_TLS11_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10		 0xc016
 
-#define OP_PCL_TLS11_RC4_40_SHA                  0x0028
+#define OP_PCL_TLS11_RC4_40_SHA			 0x0028
 
-#define OP_PCL_TLS11_3DES_EDE_CBC_MD5            0xff23
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160         0xff30
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224         0xff34
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256         0xff36
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384         0xff33
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512         0xff35
-#define OP_PCL_TLS11_AES_128_CBC_SHA160          0xff80
-#define OP_PCL_TLS11_AES_128_CBC_SHA224          0xff84
-#define OP_PCL_TLS11_AES_128_CBC_SHA256          0xff86
-#define OP_PCL_TLS11_AES_128_CBC_SHA384          0xff83
-#define OP_PCL_TLS11_AES_128_CBC_SHA512          0xff85
-#define OP_PCL_TLS11_AES_192_CBC_SHA160          0xff20
-#define OP_PCL_TLS11_AES_192_CBC_SHA224          0xff24
-#define OP_PCL_TLS11_AES_192_CBC_SHA256          0xff26
-#define OP_PCL_TLS11_AES_192_CBC_SHA384          0xff23
-#define OP_PCL_TLS11_AES_192_CBC_SHA512          0xff25
-#define OP_PCL_TLS11_AES_256_CBC_SHA160          0xff60
-#define OP_PCL_TLS11_AES_256_CBC_SHA224          0xff64
-#define OP_PCL_TLS11_AES_256_CBC_SHA256          0xff66
-#define OP_PCL_TLS11_AES_256_CBC_SHA384          0xff63
-#define OP_PCL_TLS11_AES_256_CBC_SHA512          0xff65
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512		 0xff65
 
 
 /* For TLS 1.2 - OP_PCLID_TLS12 */
-#define OP_PCL_TLS12_AES_128_CBC_SHA             0x002f
-#define OP_PCL_TLS12_AES_128_CBC_SHA_2           0x0030
-#define OP_PCL_TLS12_AES_128_CBC_SHA_3           0x0031
-#define OP_PCL_TLS12_AES_128_CBC_SHA_4           0x0032
-#define OP_PCL_TLS12_AES_128_CBC_SHA_5           0x0033
-#define OP_PCL_TLS12_AES_128_CBC_SHA_6           0x0034
-#define OP_PCL_TLS12_AES_128_CBC_SHA_7           0x008c
-#define OP_PCL_TLS12_AES_128_CBC_SHA_8           0x0090
-#define OP_PCL_TLS12_AES_128_CBC_SHA_9           0x0094
-#define OP_PCL_TLS12_AES_128_CBC_SHA_10          0xc004
-#define OP_PCL_TLS12_AES_128_CBC_SHA_11          0xc009
-#define OP_PCL_TLS12_AES_128_CBC_SHA_12          0xc00e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_13          0xc013
-#define OP_PCL_TLS12_AES_128_CBC_SHA_14          0xc018
-#define OP_PCL_TLS12_AES_128_CBC_SHA_15          0xc01d
-#define OP_PCL_TLS12_AES_128_CBC_SHA_16          0xc01e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_17          0xc01f
+#define OP_PCL_TLS12_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17		 0xc01f
 
-#define OP_PCL_TLS12_AES_256_CBC_SHA             0x0035
-#define OP_PCL_TLS12_AES_256_CBC_SHA_2           0x0036
-#define OP_PCL_TLS12_AES_256_CBC_SHA_3           0x0037
-#define OP_PCL_TLS12_AES_256_CBC_SHA_4           0x0038
-#define OP_PCL_TLS12_AES_256_CBC_SHA_5           0x0039
-#define OP_PCL_TLS12_AES_256_CBC_SHA_6           0x003a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_7           0x008d
-#define OP_PCL_TLS12_AES_256_CBC_SHA_8           0x0091
-#define OP_PCL_TLS12_AES_256_CBC_SHA_9           0x0095
-#define OP_PCL_TLS12_AES_256_CBC_SHA_10          0xc005
-#define OP_PCL_TLS12_AES_256_CBC_SHA_11          0xc00a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_12          0xc00f
-#define OP_PCL_TLS12_AES_256_CBC_SHA_13          0xc014
-#define OP_PCL_TLS12_AES_256_CBC_SHA_14          0xc019
-#define OP_PCL_TLS12_AES_256_CBC_SHA_15          0xc020
-#define OP_PCL_TLS12_AES_256_CBC_SHA_16          0xc021
-#define OP_PCL_TLS12_AES_256_CBC_SHA_17          0xc022
+#define OP_PCL_TLS12_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17		 0xc022
 
-/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5            0x0023 */
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5	    0x0023 */
 
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA            0x001f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2          0x008b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3          0x008f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4          0x0093
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5          0x000a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6          0x000d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7          0x0010
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8          0x0013
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9          0x0016
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10         0x001b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11         0xc003
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12         0xc008
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13         0xc00d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14         0xc012
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15         0xc017
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16         0xc01a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17         0xc01b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18         0xc01c
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10	 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11	 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12	 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13	 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14	 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15	 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16	 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17	 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18	 0xc01c
 
-#define OP_PCL_TLS12_DES40_CBC_MD5               0x0029
+#define OP_PCL_TLS12_DES40_CBC_MD5		 0x0029
 
-#define OP_PCL_TLS12_DES_CBC_MD5                 0x0022
+#define OP_PCL_TLS12_DES_CBC_MD5		 0x0022
 
-#define OP_PCL_TLS12_DES40_CBC_SHA               0x0008
-#define OP_PCL_TLS12_DES40_CBC_SHA_2             0x000b
-#define OP_PCL_TLS12_DES40_CBC_SHA_3             0x000e
-#define OP_PCL_TLS12_DES40_CBC_SHA_4             0x0011
-#define OP_PCL_TLS12_DES40_CBC_SHA_5             0x0014
-#define OP_PCL_TLS12_DES40_CBC_SHA_6             0x0019
-#define OP_PCL_TLS12_DES40_CBC_SHA_7             0x0026
+#define OP_PCL_TLS12_DES40_CBC_SHA		 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7		 0x0026
 
-#define OP_PCL_TLS12_DES_CBC_SHA                 0x001e
-#define OP_PCL_TLS12_DES_CBC_SHA_2               0x0009
-#define OP_PCL_TLS12_DES_CBC_SHA_3               0x000c
-#define OP_PCL_TLS12_DES_CBC_SHA_4               0x000f
-#define OP_PCL_TLS12_DES_CBC_SHA_5               0x0012
-#define OP_PCL_TLS12_DES_CBC_SHA_6               0x0015
-#define OP_PCL_TLS12_DES_CBC_SHA_7               0x001a
+#define OP_PCL_TLS12_DES_CBC_SHA		 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7		 0x001a
 
-#define OP_PCL_TLS12_RC4_128_MD5                 0x0024
-#define OP_PCL_TLS12_RC4_128_MD5_2               0x0004
-#define OP_PCL_TLS12_RC4_128_MD5_3               0x0018
+#define OP_PCL_TLS12_RC4_128_MD5		 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2		 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3		 0x0018
 
-#define OP_PCL_TLS12_RC4_40_MD5                  0x002b
-#define OP_PCL_TLS12_RC4_40_MD5_2                0x0003
-#define OP_PCL_TLS12_RC4_40_MD5_3                0x0017
+#define OP_PCL_TLS12_RC4_40_MD5			 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2		 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3		 0x0017
 
-#define OP_PCL_TLS12_RC4_128_SHA                 0x0020
-#define OP_PCL_TLS12_RC4_128_SHA_2               0x008a
-#define OP_PCL_TLS12_RC4_128_SHA_3               0x008e
-#define OP_PCL_TLS12_RC4_128_SHA_4               0x0092
-#define OP_PCL_TLS12_RC4_128_SHA_5               0x0005
-#define OP_PCL_TLS12_RC4_128_SHA_6               0xc002
-#define OP_PCL_TLS12_RC4_128_SHA_7               0xc007
-#define OP_PCL_TLS12_RC4_128_SHA_8               0xc00c
-#define OP_PCL_TLS12_RC4_128_SHA_9               0xc011
-#define OP_PCL_TLS12_RC4_128_SHA_10              0xc016
+#define OP_PCL_TLS12_RC4_128_SHA		 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2		 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3		 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4		 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5		 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6		 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7		 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8		 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9		 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10		 0xc016
 
-#define OP_PCL_TLS12_RC4_40_SHA                  0x0028
+#define OP_PCL_TLS12_RC4_40_SHA			 0x0028
 
-/* #define OP_PCL_TLS12_AES_128_CBC_SHA256          0x003c */
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_2        0x003e
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_3        0x003f
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_4        0x0040
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_5        0x0067
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_6        0x006c
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256	    0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2	 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3	 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4	 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5	 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6	 0x006c
 
-/* #define OP_PCL_TLS12_AES_256_CBC_SHA256          0x003d */
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_2        0x0068
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_3        0x0069
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_4        0x006a
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_5        0x006b
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_6        0x006d
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256	    0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2	 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3	 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4	 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5	 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6	 0x006d
 
 /* AEAD_AES_xxx_CCM/GCM remain to be defined... */
 
-#define OP_PCL_TLS12_3DES_EDE_CBC_MD5            0xff23
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160         0xff30
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224         0xff34
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256         0xff36
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384         0xff33
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512         0xff35
-#define OP_PCL_TLS12_AES_128_CBC_SHA160          0xff80
-#define OP_PCL_TLS12_AES_128_CBC_SHA224          0xff84
-#define OP_PCL_TLS12_AES_128_CBC_SHA256          0xff86
-#define OP_PCL_TLS12_AES_128_CBC_SHA384          0xff83
-#define OP_PCL_TLS12_AES_128_CBC_SHA512          0xff85
-#define OP_PCL_TLS12_AES_192_CBC_SHA160          0xff20
-#define OP_PCL_TLS12_AES_192_CBC_SHA224          0xff24
-#define OP_PCL_TLS12_AES_192_CBC_SHA256          0xff26
-#define OP_PCL_TLS12_AES_192_CBC_SHA384          0xff23
-#define OP_PCL_TLS12_AES_192_CBC_SHA512          0xff25
-#define OP_PCL_TLS12_AES_256_CBC_SHA160          0xff60
-#define OP_PCL_TLS12_AES_256_CBC_SHA224          0xff64
-#define OP_PCL_TLS12_AES_256_CBC_SHA256          0xff66
-#define OP_PCL_TLS12_AES_256_CBC_SHA384          0xff63
-#define OP_PCL_TLS12_AES_256_CBC_SHA512          0xff65
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160	 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224	 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256	 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384	 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512	 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512		 0xff65
 
 /* For DTLS - OP_PCLID_DTLS */
 
-#define OP_PCL_DTLS_AES_128_CBC_SHA              0x002f
-#define OP_PCL_DTLS_AES_128_CBC_SHA_2            0x0030
-#define OP_PCL_DTLS_AES_128_CBC_SHA_3            0x0031
-#define OP_PCL_DTLS_AES_128_CBC_SHA_4            0x0032
-#define OP_PCL_DTLS_AES_128_CBC_SHA_5            0x0033
-#define OP_PCL_DTLS_AES_128_CBC_SHA_6            0x0034
-#define OP_PCL_DTLS_AES_128_CBC_SHA_7            0x008c
-#define OP_PCL_DTLS_AES_128_CBC_SHA_8            0x0090
-#define OP_PCL_DTLS_AES_128_CBC_SHA_9            0x0094
-#define OP_PCL_DTLS_AES_128_CBC_SHA_10           0xc004
-#define OP_PCL_DTLS_AES_128_CBC_SHA_11           0xc009
-#define OP_PCL_DTLS_AES_128_CBC_SHA_12           0xc00e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_13           0xc013
-#define OP_PCL_DTLS_AES_128_CBC_SHA_14           0xc018
-#define OP_PCL_DTLS_AES_128_CBC_SHA_15           0xc01d
-#define OP_PCL_DTLS_AES_128_CBC_SHA_16           0xc01e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_17           0xc01f
+#define OP_PCL_DTLS_AES_128_CBC_SHA		 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2		 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3		 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4		 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5		 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6		 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7		 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8		 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9		 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10		 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11		 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12		 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13		 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14		 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15		 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16		 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17		 0xc01f
 
-#define OP_PCL_DTLS_AES_256_CBC_SHA              0x0035
-#define OP_PCL_DTLS_AES_256_CBC_SHA_2            0x0036
-#define OP_PCL_DTLS_AES_256_CBC_SHA_3            0x0037
-#define OP_PCL_DTLS_AES_256_CBC_SHA_4            0x0038
-#define OP_PCL_DTLS_AES_256_CBC_SHA_5            0x0039
-#define OP_PCL_DTLS_AES_256_CBC_SHA_6            0x003a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_7            0x008d
-#define OP_PCL_DTLS_AES_256_CBC_SHA_8            0x0091
-#define OP_PCL_DTLS_AES_256_CBC_SHA_9            0x0095
-#define OP_PCL_DTLS_AES_256_CBC_SHA_10           0xc005
-#define OP_PCL_DTLS_AES_256_CBC_SHA_11           0xc00a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_12           0xc00f
-#define OP_PCL_DTLS_AES_256_CBC_SHA_13           0xc014
-#define OP_PCL_DTLS_AES_256_CBC_SHA_14           0xc019
-#define OP_PCL_DTLS_AES_256_CBC_SHA_15           0xc020
-#define OP_PCL_DTLS_AES_256_CBC_SHA_16           0xc021
-#define OP_PCL_DTLS_AES_256_CBC_SHA_17           0xc022
+#define OP_PCL_DTLS_AES_256_CBC_SHA		 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2		 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3		 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4		 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5		 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6		 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7		 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8		 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9		 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10		 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11		 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12		 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13		 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14		 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15		 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16		 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17		 0xc022
 
-/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5             0x0023 */
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5		    0x0023 */
 
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA             0x001f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2           0x008b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3           0x008f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4           0x0093
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5           0x000a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6           0x000d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7           0x0010
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8           0x0013
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9           0x0016
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10          0x001b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11          0xc003
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12          0xc008
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13          0xc00d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14          0xc012
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15          0xc017
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16          0xc01a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17          0xc01b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18          0xc01c
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA		 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2		 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3		 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4		 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5		 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6		 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7		 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8		 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9		 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10		 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11		 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12		 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13		 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14		 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15		 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16		 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17		 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18		 0xc01c
 
-#define OP_PCL_DTLS_DES40_CBC_MD5                0x0029
+#define OP_PCL_DTLS_DES40_CBC_MD5		 0x0029
 
-#define OP_PCL_DTLS_DES_CBC_MD5                  0x0022
+#define OP_PCL_DTLS_DES_CBC_MD5			 0x0022
 
-#define OP_PCL_DTLS_DES40_CBC_SHA                0x0008
-#define OP_PCL_DTLS_DES40_CBC_SHA_2              0x000b
-#define OP_PCL_DTLS_DES40_CBC_SHA_3              0x000e
-#define OP_PCL_DTLS_DES40_CBC_SHA_4              0x0011
-#define OP_PCL_DTLS_DES40_CBC_SHA_5              0x0014
-#define OP_PCL_DTLS_DES40_CBC_SHA_6              0x0019
-#define OP_PCL_DTLS_DES40_CBC_SHA_7              0x0026
+#define OP_PCL_DTLS_DES40_CBC_SHA		 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2		 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3		 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4		 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5		 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6		 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7		 0x0026
 
 
-#define OP_PCL_DTLS_DES_CBC_SHA                  0x001e
-#define OP_PCL_DTLS_DES_CBC_SHA_2                0x0009
-#define OP_PCL_DTLS_DES_CBC_SHA_3                0x000c
-#define OP_PCL_DTLS_DES_CBC_SHA_4                0x000f
-#define OP_PCL_DTLS_DES_CBC_SHA_5                0x0012
-#define OP_PCL_DTLS_DES_CBC_SHA_6                0x0015
-#define OP_PCL_DTLS_DES_CBC_SHA_7                0x001a
+#define OP_PCL_DTLS_DES_CBC_SHA			 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2		 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3		 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4		 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5		 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6		 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7		 0x001a
 
 
-#define OP_PCL_DTLS_3DES_EDE_CBC_MD5             0xff23
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160          0xff30
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224          0xff34
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256          0xff36
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384          0xff33
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512          0xff35
-#define OP_PCL_DTLS_AES_128_CBC_SHA160           0xff80
-#define OP_PCL_DTLS_AES_128_CBC_SHA224           0xff84
-#define OP_PCL_DTLS_AES_128_CBC_SHA256           0xff86
-#define OP_PCL_DTLS_AES_128_CBC_SHA384           0xff83
-#define OP_PCL_DTLS_AES_128_CBC_SHA512           0xff85
-#define OP_PCL_DTLS_AES_192_CBC_SHA160           0xff20
-#define OP_PCL_DTLS_AES_192_CBC_SHA224           0xff24
-#define OP_PCL_DTLS_AES_192_CBC_SHA256           0xff26
-#define OP_PCL_DTLS_AES_192_CBC_SHA384           0xff23
-#define OP_PCL_DTLS_AES_192_CBC_SHA512           0xff25
-#define OP_PCL_DTLS_AES_256_CBC_SHA160           0xff60
-#define OP_PCL_DTLS_AES_256_CBC_SHA224           0xff64
-#define OP_PCL_DTLS_AES_256_CBC_SHA256           0xff66
-#define OP_PCL_DTLS_AES_256_CBC_SHA384           0xff63
-#define OP_PCL_DTLS_AES_256_CBC_SHA512           0xff65
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5		 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160		 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224		 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256		 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384		 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512		 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160		 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224		 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256		 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384		 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512		 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160		 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224		 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256		 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384		 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512		 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160		 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224		 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256		 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384		 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512		 0xff65
 
 /* 802.16 WiMAX protinfos */
-#define OP_PCL_WIMAX_OFDM                        0x0201
-#define OP_PCL_WIMAX_OFDMA                       0x0231
+#define OP_PCL_WIMAX_OFDM			 0x0201
+#define OP_PCL_WIMAX_OFDMA			 0x0231
 
 /* 802.11 WiFi protinfos */
-#define OP_PCL_WIFI                              0xac04
+#define OP_PCL_WIFI				 0xac04
 
 /* MacSec protinfos */
-#define OP_PCL_MACSEC                            0x0001
+#define OP_PCL_MACSEC				 0x0001
 
 /* PKI unidirectional protocol protinfo bits */
-#define OP_PCL_PKPROT_TEST                       0x0008
-#define OP_PCL_PKPROT_DECRYPT                    0x0004
-#define OP_PCL_PKPROT_ECC                        0x0002
-#define OP_PCL_PKPROT_F2M                        0x0001
+#define OP_PCL_PKPROT_TEST			 0x0008
+#define OP_PCL_PKPROT_DECRYPT			 0x0004
+#define OP_PCL_PKPROT_ECC			 0x0002
+#define OP_PCL_PKPROT_F2M			 0x0001
 
 /* For non-protocol/alg-only op commands */
 #define OP_ALG_TYPE_SHIFT	24
@@ -1181,38 +1181,38 @@
 #define OP_ALG_ENCRYPT		1
 
 /* PKHA algorithm type set */
-#define OP_ALG_PK                    0x00800000
-#define OP_ALG_PK_FUN_MASK           0x3f /* clrmem, modmath, or cpymem */
+#define OP_ALG_PK		     0x00800000
+#define OP_ALG_PK_FUN_MASK	     0x3f /* clrmem, modmath, or cpymem */
 
 /* PKHA mode clear memory functions */
-#define OP_ALG_PKMODE_A_RAM          0x80000
-#define OP_ALG_PKMODE_B_RAM          0x40000
-#define OP_ALG_PKMODE_E_RAM          0x20000
-#define OP_ALG_PKMODE_N_RAM          0x10000
-#define OP_ALG_PKMODE_CLEARMEM       0x00001
+#define OP_ALG_PKMODE_A_RAM	     0x80000
+#define OP_ALG_PKMODE_B_RAM	     0x40000
+#define OP_ALG_PKMODE_E_RAM	     0x20000
+#define OP_ALG_PKMODE_N_RAM	     0x10000
+#define OP_ALG_PKMODE_CLEARMEM	     0x00001
 
 /* PKHA mode modular-arithmetic functions */
 #define OP_ALG_PKMODE_MOD_IN_MONTY   0x80000
 #define OP_ALG_PKMODE_MOD_OUT_MONTY  0x40000
-#define OP_ALG_PKMODE_MOD_F2M        0x20000
+#define OP_ALG_PKMODE_MOD_F2M	     0x20000
 #define OP_ALG_PKMODE_MOD_R2_IN      0x10000
-#define OP_ALG_PKMODE_PRJECTV        0x00800
-#define OP_ALG_PKMODE_TIME_EQ        0x400
-#define OP_ALG_PKMODE_OUT_B          0x000
-#define OP_ALG_PKMODE_OUT_A          0x100
-#define OP_ALG_PKMODE_MOD_ADD        0x002
+#define OP_ALG_PKMODE_PRJECTV	     0x00800
+#define OP_ALG_PKMODE_TIME_EQ	     0x400
+#define OP_ALG_PKMODE_OUT_B	     0x000
+#define OP_ALG_PKMODE_OUT_A	     0x100
+#define OP_ALG_PKMODE_MOD_ADD	     0x002
 #define OP_ALG_PKMODE_MOD_SUB_AB     0x003
 #define OP_ALG_PKMODE_MOD_SUB_BA     0x004
-#define OP_ALG_PKMODE_MOD_MULT       0x005
-#define OP_ALG_PKMODE_MOD_EXPO       0x006
+#define OP_ALG_PKMODE_MOD_MULT	     0x005
+#define OP_ALG_PKMODE_MOD_EXPO	     0x006
 #define OP_ALG_PKMODE_MOD_REDUCT     0x007
-#define OP_ALG_PKMODE_MOD_INV        0x008
+#define OP_ALG_PKMODE_MOD_INV	     0x008
 #define OP_ALG_PKMODE_MOD_ECC_ADD    0x009
 #define OP_ALG_PKMODE_MOD_ECC_DBL    0x00a
 #define OP_ALG_PKMODE_MOD_ECC_MULT   0x00b
 #define OP_ALG_PKMODE_MOD_MONT_CNST  0x00c
 #define OP_ALG_PKMODE_MOD_CRT_CNST   0x00d
-#define OP_ALG_PKMODE_MOD_GCD        0x00e
+#define OP_ALG_PKMODE_MOD_GCD	     0x00e
 #define OP_ALG_PKMODE_MOD_PRIMALITY  0x00f
 
 /* PKHA mode copy-memory functions */
@@ -1248,47 +1248,47 @@
  */
 
 /* Release Buffers */
-#define SQIN_RBS               0x04000000
+#define SQIN_RBS	       0x04000000
 
 /* Sequence pointer is really a descriptor */
-#define SQIN_INL               0x02000000
+#define SQIN_INL	       0x02000000
 
 /* Sequence pointer is a scatter-gather table */
-#define SQIN_SGF               0x01000000
+#define SQIN_SGF	       0x01000000
 
 /* Appends to a previous pointer */
-#define SQIN_PRE               0x00800000
+#define SQIN_PRE	       0x00800000
 
 /* Use extended length following pointer */
-#define SQIN_EXT               0x00400000
+#define SQIN_EXT	       0x00400000
 
 /* Restore sequence with pointer/length */
-#define SQIN_RTO               0x00200000
+#define SQIN_RTO	       0x00200000
 
 /* Replace job descriptor */
-#define SQIN_RJD               0x00100000
+#define SQIN_RJD	       0x00100000
 
-#define SQIN_LEN_SHIFT           0
-#define SQIN_LEN_MASK           (0xffff << SQIN_LEN_SHIFT)
+#define SQIN_LEN_SHIFT		 0
+#define SQIN_LEN_MASK		(0xffff << SQIN_LEN_SHIFT)
 
 /*
  * SEQ_OUT_PTR Command Constructs
  */
 
 /* Sequence pointer is a scatter-gather table */
-#define SQOUT_SGF              0x01000000
+#define SQOUT_SGF	       0x01000000
 
 /* Appends to a previous pointer */
-#define SQOUT_PRE              0x00800000
+#define SQOUT_PRE	       0x00800000
 
 /* Restore sequence with pointer/length */
-#define SQOUT_RTO              0x00200000
+#define SQOUT_RTO	       0x00200000
 
 /* Use extended length following pointer */
-#define SQOUT_EXT              0x00400000
+#define SQOUT_EXT	       0x00400000
 
-#define SQOUT_LEN_SHIFT           0
-#define SQOUT_LEN_MASK           (0xffff << SQOUT_LEN_SHIFT)
+#define SQOUT_LEN_SHIFT		  0
+#define SQOUT_LEN_MASK		 (0xffff << SQOUT_LEN_SHIFT)
 
 
 /*
@@ -1296,196 +1296,196 @@
  */
 
 /* TYPE field is all that's relevant */
-#define SIGN_TYPE_SHIFT         16
-#define SIGN_TYPE_MASK          (0x0f << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_SHIFT		16
+#define SIGN_TYPE_MASK		(0x0f << SIGN_TYPE_SHIFT)
 
-#define SIGN_TYPE_FINAL         (0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL		(0x00 << SIGN_TYPE_SHIFT)
 #define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
 #define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_2         (0x0a << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_3         (0x0b << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_4         (0x0c << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2		(0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3		(0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4		(0x0c << SIGN_TYPE_SHIFT)
 
 /*
  * MOVE Command Constructs
  */
 
-#define MOVE_AUX_SHIFT          25
-#define MOVE_AUX_MASK           (3 << MOVE_AUX_SHIFT)
-#define MOVE_AUX_MS             (2 << MOVE_AUX_SHIFT)
-#define MOVE_AUX_LS             (1 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_SHIFT		25
+#define MOVE_AUX_MASK		(3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS		(2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS		(1 << MOVE_AUX_SHIFT)
 
-#define MOVE_WAITCOMP_SHIFT     24
-#define MOVE_WAITCOMP_MASK      (1 << MOVE_WAITCOMP_SHIFT)
-#define MOVE_WAITCOMP           (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP_SHIFT	24
+#define MOVE_WAITCOMP_MASK	(1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP		(1 << MOVE_WAITCOMP_SHIFT)
 
-#define MOVE_SRC_SHIFT          20
-#define MOVE_SRC_MASK           (0x0f << MOVE_SRC_SHIFT)
-#define MOVE_SRC_CLASS1CTX      (0x00 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_CLASS2CTX      (0x01 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_OUTFIFO        (0x02 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_DESCBUF        (0x03 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH0          (0x04 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH1          (0x05 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH2          (0x06 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH3          (0x07 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_INFIFO         (0x08 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_INFIFO_CL      (0x09 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_SHIFT		20
+#define MOVE_SRC_MASK		(0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX	(0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX	(0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO	(0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF	(0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0		(0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1		(0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2		(0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3		(0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO		(0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL	(0x09 << MOVE_SRC_SHIFT)
 
-#define MOVE_DEST_SHIFT         16
-#define MOVE_DEST_MASK          (0x0f << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1CTX     (0x00 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2CTX     (0x01 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_OUTFIFO       (0x02 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_DESCBUF       (0x03 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH0         (0x04 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH1         (0x05 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH2         (0x06 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH3         (0x07 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1INFIFO  (0x08 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2INFIFO  (0x09 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_PK_A          (0x0c << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1KEY     (0x0d << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2KEY     (0x0e << MOVE_DEST_SHIFT)
+#define MOVE_DEST_SHIFT		16
+#define MOVE_DEST_MASK		(0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX	(0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX	(0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO	(0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF	(0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0		(0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1		(0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2		(0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3		(0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO	(0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO	(0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A		(0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY	(0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY	(0x0e << MOVE_DEST_SHIFT)
 
-#define MOVE_OFFSET_SHIFT       8
-#define MOVE_OFFSET_MASK        (0xff << MOVE_OFFSET_SHIFT)
+#define MOVE_OFFSET_SHIFT	8
+#define MOVE_OFFSET_MASK	(0xff << MOVE_OFFSET_SHIFT)
 
-#define MOVE_LEN_SHIFT          0
-#define MOVE_LEN_MASK           (0xff << MOVE_LEN_SHIFT)
+#define MOVE_LEN_SHIFT		0
+#define MOVE_LEN_MASK		(0xff << MOVE_LEN_SHIFT)
 
-#define MOVELEN_MRSEL_SHIFT     0
-#define MOVELEN_MRSEL_MASK      (0x3 << MOVE_LEN_SHIFT)
+#define MOVELEN_MRSEL_SHIFT	0
+#define MOVELEN_MRSEL_MASK	(0x3 << MOVE_LEN_SHIFT)
 
 /*
  * MATH Command Constructs
  */
 
-#define MATH_IFB_SHIFT          26
-#define MATH_IFB_MASK           (1 << MATH_IFB_SHIFT)
-#define MATH_IFB                (1 << MATH_IFB_SHIFT)
+#define MATH_IFB_SHIFT		26
+#define MATH_IFB_MASK		(1 << MATH_IFB_SHIFT)
+#define MATH_IFB		(1 << MATH_IFB_SHIFT)
 
-#define MATH_NFU_SHIFT          25
-#define MATH_NFU_MASK           (1 << MATH_NFU_SHIFT)
-#define MATH_NFU                (1 << MATH_NFU_SHIFT)
+#define MATH_NFU_SHIFT		25
+#define MATH_NFU_MASK		(1 << MATH_NFU_SHIFT)
+#define MATH_NFU		(1 << MATH_NFU_SHIFT)
 
-#define MATH_STL_SHIFT          24
-#define MATH_STL_MASK           (1 << MATH_STL_SHIFT)
-#define MATH_STL                (1 << MATH_STL_SHIFT)
+#define MATH_STL_SHIFT		24
+#define MATH_STL_MASK		(1 << MATH_STL_SHIFT)
+#define MATH_STL		(1 << MATH_STL_SHIFT)
 
 /* Function selectors */
-#define MATH_FUN_SHIFT          20
-#define MATH_FUN_MASK           (0x0f << MATH_FUN_SHIFT)
-#define MATH_FUN_ADD            (0x00 << MATH_FUN_SHIFT)
-#define MATH_FUN_ADDC           (0x01 << MATH_FUN_SHIFT)
-#define MATH_FUN_SUB            (0x02 << MATH_FUN_SHIFT)
-#define MATH_FUN_SUBB           (0x03 << MATH_FUN_SHIFT)
-#define MATH_FUN_OR             (0x04 << MATH_FUN_SHIFT)
-#define MATH_FUN_AND            (0x05 << MATH_FUN_SHIFT)
-#define MATH_FUN_XOR            (0x06 << MATH_FUN_SHIFT)
-#define MATH_FUN_LSHIFT         (0x07 << MATH_FUN_SHIFT)
-#define MATH_FUN_RSHIFT         (0x08 << MATH_FUN_SHIFT)
-#define MATH_FUN_SHLD           (0x09 << MATH_FUN_SHIFT)
-#define MATH_FUN_ZBYT           (0x0a << MATH_FUN_SHIFT)
+#define MATH_FUN_SHIFT		20
+#define MATH_FUN_MASK		(0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD		(0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC		(0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB		(0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB		(0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR		(0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND		(0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR		(0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT		(0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT		(0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD		(0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT		(0x0a << MATH_FUN_SHIFT)
 
 /* Source 0 selectors */
-#define MATH_SRC0_SHIFT         16
-#define MATH_SRC0_MASK          (0x0f << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG0          (0x00 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG1          (0x01 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG2          (0x02 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG3          (0x03 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_IMM           (0x04 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_SEQINLEN      (0x08 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_SEQOUTLEN     (0x09 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_VARSEQINLEN   (0x0a << MATH_SRC0_SHIFT)
-#define MATH_SRC0_VARSEQOUTLEN  (0x0b << MATH_SRC0_SHIFT)
-#define MATH_SRC0_ZERO          (0x0c << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SHIFT		16
+#define MATH_SRC0_MASK		(0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0		(0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1		(0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2		(0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3		(0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM		(0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN	(0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN	(0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN	(0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN	(0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO		(0x0c << MATH_SRC0_SHIFT)
 
 /* Source 1 selectors */
-#define MATH_SRC1_SHIFT         12
-#define MATH_SRC1_MASK          (0x0f << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG0          (0x00 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG1          (0x01 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG2          (0x02 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG3          (0x03 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_IMM           (0x04 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_INFIFO        (0x0a << MATH_SRC1_SHIFT)
-#define MATH_SRC1_OUTFIFO       (0x0b << MATH_SRC1_SHIFT)
-#define MATH_SRC1_ONE           (0x0c << MATH_SRC1_SHIFT)
+#define MATH_SRC1_SHIFT		12
+#define MATH_SRC1_MASK		(0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0		(0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1		(0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2		(0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3		(0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM		(0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO	(0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO	(0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE		(0x0c << MATH_SRC1_SHIFT)
 
 /* Destination selectors */
-#define MATH_DEST_SHIFT         8
-#define MATH_DEST_MASK          (0x0f << MATH_DEST_SHIFT)
-#define MATH_DEST_REG0          (0x00 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG1          (0x01 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG2          (0x02 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG3          (0x03 << MATH_DEST_SHIFT)
-#define MATH_DEST_SEQINLEN      (0x08 << MATH_DEST_SHIFT)
-#define MATH_DEST_SEQOUTLEN     (0x09 << MATH_DEST_SHIFT)
-#define MATH_DEST_VARSEQINLEN   (0x0a << MATH_DEST_SHIFT)
-#define MATH_DEST_VARSEQOUTLEN  (0x0b << MATH_DEST_SHIFT)
-#define MATH_DEST_NONE          (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_SHIFT		8
+#define MATH_DEST_MASK		(0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0		(0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1		(0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2		(0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3		(0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN	(0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN	(0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN	(0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN	(0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE		(0x0f << MATH_DEST_SHIFT)
 
 /* Length selectors */
-#define MATH_LEN_SHIFT          0
-#define MATH_LEN_MASK           (0x0f << MATH_LEN_SHIFT)
-#define MATH_LEN_1BYTE          0x01
-#define MATH_LEN_2BYTE          0x02
-#define MATH_LEN_4BYTE          0x04
-#define MATH_LEN_8BYTE          0x08
+#define MATH_LEN_SHIFT		0
+#define MATH_LEN_MASK		(0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE		0x01
+#define MATH_LEN_2BYTE		0x02
+#define MATH_LEN_4BYTE		0x04
+#define MATH_LEN_8BYTE		0x08
 
 /*
  * JUMP Command Constructs
  */
 
-#define JUMP_CLASS_SHIFT        25
+#define JUMP_CLASS_SHIFT	25
 #define JUMP_CLASS_MASK		(3 << JUMP_CLASS_SHIFT)
 #define JUMP_CLASS_NONE		0
 #define JUMP_CLASS_CLASS1	(1 << JUMP_CLASS_SHIFT)
 #define JUMP_CLASS_CLASS2	(2 << JUMP_CLASS_SHIFT)
 #define JUMP_CLASS_BOTH		(3 << JUMP_CLASS_SHIFT)
 
-#define JUMP_JSL_SHIFT          24
-#define JUMP_JSL_MASK           (1 << JUMP_JSL_SHIFT)
-#define JUMP_JSL                (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL_SHIFT		24
+#define JUMP_JSL_MASK		(1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL		(1 << JUMP_JSL_SHIFT)
 
-#define JUMP_TYPE_SHIFT         22
-#define JUMP_TYPE_MASK          (0x03 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_LOCAL         (0x00 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_NONLOCAL      (0x01 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_HALT          (0x02 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_HALT_USER     (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_SHIFT		22
+#define JUMP_TYPE_MASK		(0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL		(0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL	(0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT		(0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER	(0x03 << JUMP_TYPE_SHIFT)
 
-#define JUMP_TEST_SHIFT         16
-#define JUMP_TEST_MASK          (0x03 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_ALL           (0x00 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_INVALL        (0x01 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_ANY           (0x02 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_INVANY        (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_SHIFT		16
+#define JUMP_TEST_MASK		(0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL		(0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL	(0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY		(0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY	(0x03 << JUMP_TEST_SHIFT)
 
 /* Condition codes. JSL bit is factored in */
-#define JUMP_COND_SHIFT         8
-#define JUMP_COND_MASK          (0x100ff << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_0          (0x80 << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_GCD_1      (0x40 << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_PRIME      (0x20 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_N        (0x08 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_Z        (0x04 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_C        (0x02 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_NV       (0x01 << JUMP_COND_SHIFT)
+#define JUMP_COND_SHIFT		8
+#define JUMP_COND_MASK		(0x100ff << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_0		(0x80 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_GCD_1	(0x40 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_PRIME	(0x20 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_N	(0x08 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_Z	(0x04 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_C	(0x02 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_NV	(0x01 << JUMP_COND_SHIFT)
 
-#define JUMP_COND_JRP           ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_SHRD          ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_SELF          ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_CALM          ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NIP           ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NIFP          ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NOP           ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NCP           ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_JRP		((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SHRD		((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SELF		((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_CALM		((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIP		((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIFP		((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NOP		((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NCP		((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
 
-#define JUMP_OFFSET_SHIFT       0
-#define JUMP_OFFSET_MASK        (0xff << JUMP_OFFSET_SHIFT)
+#define JUMP_OFFSET_SHIFT	0
+#define JUMP_OFFSET_MASK	(0xff << JUMP_OFFSET_SHIFT)
 
 /*
  * NFIFO ENTRY
@@ -1525,26 +1525,26 @@
 #define NFIFOENTRY_DTYPE_SHIFT	20
 #define NFIFOENTRY_DTYPE_MASK	(0xF << NFIFOENTRY_DTYPE_SHIFT)
 
-#define NFIFOENTRY_DTYPE_SBOX      (0x0  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_AAD       (0x1  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_IV        (0x2  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SAD       (0x3  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_ICV       (0xA  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SKIP      (0xE  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_MSG       (0xF  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SBOX	   (0x0  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD	   (0x1  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV	   (0x2  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD	   (0x3  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV	   (0xA  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP	   (0xE  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG	   (0xF  << NFIFOENTRY_DTYPE_SHIFT)
 
-#define NFIFOENTRY_DTYPE_PK_A0     (0x0  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A1     (0x1  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A2     (0x2  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A3     (0x3  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B0     (0x4  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B1     (0x5  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B2     (0x6  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B3     (0x7  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_N      (0x8  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_E      (0x9  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A      (0xC  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B      (0xD  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A0	   (0x0  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1	   (0x1  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2	   (0x2  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3	   (0x3  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0	   (0x4  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1	   (0x5  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2	   (0x6  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3	   (0x7  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N	   (0x8  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E	   (0x9  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A	   (0xC  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B	   (0xD  << NFIFOENTRY_DTYPE_SHIFT)
 
 
 #define NFIFOENTRY_BND_SHIFT	19
@@ -1554,13 +1554,13 @@
 #define NFIFOENTRY_PTYPE_SHIFT	16
 #define NFIFOENTRY_PTYPE_MASK	(0x7 << NFIFOENTRY_PTYPE_SHIFT)
 
-#define NFIFOENTRY_PTYPE_ZEROS         (0x0  << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS	       (0x0  << NFIFOENTRY_PTYPE_SHIFT)
 #define NFIFOENTRY_PTYPE_RND_NOZEROS   (0x1  << NFIFOENTRY_PTYPE_SHIFT)
 #define NFIFOENTRY_PTYPE_INCREMENT     (0x2  << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND           (0x3  << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND	       (0x3  << NFIFOENTRY_PTYPE_SHIFT)
 #define NFIFOENTRY_PTYPE_ZEROS_NZ      (0x4  << NFIFOENTRY_PTYPE_SHIFT)
 #define NFIFOENTRY_PTYPE_RND_NZ_LZ     (0x5  << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_N             (0x6  << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N	       (0x6  << NFIFOENTRY_PTYPE_SHIFT)
 #define NFIFOENTRY_PTYPE_RND_NZ_N      (0x7  << NFIFOENTRY_PTYPE_SHIFT)
 
 #define NFIFOENTRY_OC_SHIFT		15
@@ -1591,15 +1591,15 @@
  */
 
 /* IPSec ESP CBC Encap/Decap Options */
-#define PDBOPTS_ESPCBC_ARSNONE  0x00   /* no antireplay window              */
-#define PDBOPTS_ESPCBC_ARS32    0x40   /* 32-entry antireplay window        */
-#define PDBOPTS_ESPCBC_ARS64    0xc0   /* 64-entry antireplay window        */
-#define PDBOPTS_ESPCBC_IVSRC    0x20   /* IV comes from internal random gen */
-#define PDBOPTS_ESPCBC_ESN      0x10   /* extended sequence included        */
-#define PDBOPTS_ESPCBC_OUTFMT   0x08   /* output only decapsulation (decap) */
+#define PDBOPTS_ESPCBC_ARSNONE	0x00   /* no antireplay window		    */
+#define PDBOPTS_ESPCBC_ARS32	0x40   /* 32-entry antireplay window	    */
+#define PDBOPTS_ESPCBC_ARS64	0xc0   /* 64-entry antireplay window	    */
+#define PDBOPTS_ESPCBC_IVSRC	0x20   /* IV comes from internal random gen */
+#define PDBOPTS_ESPCBC_ESN	0x10   /* extended sequence included	    */
+#define PDBOPTS_ESPCBC_OUTFMT	0x08   /* output only decapsulation (decap) */
 #define PDBOPTS_ESPCBC_IPHDRSRC 0x08   /* IP header comes from PDB (encap)  */
 #define PDBOPTS_ESPCBC_INCIPHDR 0x04   /* Prepend IP header to output frame */
-#define PDBOPTS_ESPCBC_IPVSN    0x02   /* process IPv6 header               */
-#define PDBOPTS_ESPCBC_TUNNEL   0x01   /* tunnel mode next-header byte      */
+#define PDBOPTS_ESPCBC_IPVSN	0x02   /* process IPv6 header		    */
+#define PDBOPTS_ESPCBC_TUNNEL	0x01   /* tunnel mode next-header byte	    */
 
 #endif /* DESC_H */

From b028b546a6a36aa70ec504c00d3d94fae32acf86 Mon Sep 17 00:00:00 2001
From: Kim Phillips <kim.phillips@freescale.com>
Date: Mon, 12 Dec 2011 14:59:14 -0600
Subject: [PATCH 44/54] crypto: caam - more desc.h cleanups

manual removal of double-spaces - no non-whitespace changes.

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/caam/desc.h | 331 ++++++++++++++++++-------------------
 1 file changed, 165 insertions(+), 166 deletions(-)

diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 090c91661f15..6984cc564932 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -74,10 +74,10 @@
 #define HDR_START_IDX_SHIFT	16
 
 /* If shared descriptor header, 6-bit length */
-#define HDR_DESCLEN_SHR_MASK  0x3f
+#define HDR_DESCLEN_SHR_MASK	0x3f
 
 /* If non-shared header, 7-bit length */
-#define HDR_DESCLEN_MASK      0x7f
+#define HDR_DESCLEN_MASK	0x7f
 
 /* This is a TrustedDesc (if not SharedDesc) */
 #define HDR_TRUSTED		0x00004000
@@ -120,8 +120,8 @@
  * KEY/SEQ_KEY Command Constructs
  */
 
-/* Key Destination Class: 01 = Class 1, 02 - Class 2  */
-#define KEY_DEST_CLASS_SHIFT	25  /* use CLASS_1 or CLASS_2 */
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT	25	/* use CLASS_1 or CLASS_2 */
 #define KEY_DEST_CLASS_MASK	(0x03 << KEY_DEST_CLASS_SHIFT)
 
 /* Scatter-Gather Table/Variable Length Field */
@@ -186,12 +186,12 @@
 #define LDST_SGF		0x01000000
 #define LDST_VLF		LDST_SGF
 
-/* Immediate - Key follows this command in descriptor	 */
+/* Immediate - Key follows this command in descriptor */
 #define LDST_IMM_MASK		1
 #define LDST_IMM_SHIFT		23
 #define LDST_IMM		(LDST_IMM_MASK << LDST_IMM_SHIFT)
 
-/* SRC/DST - Destination for LOAD, Source for STORE   */
+/* SRC/DST - Destination for LOAD, Source for STORE */
 #define LDST_SRCDST_SHIFT	16
 #define LDST_SRCDST_MASK	(0x7f << LDST_SRCDST_SHIFT)
 
@@ -224,17 +224,17 @@
 #define LDST_SRCDST_WORD_DESCBUF	(0x40 << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_WORD_INFO_FIFO	(0x7a << LDST_SRCDST_SHIFT)
 
-/* Offset in source/destination			       */
+/* Offset in source/destination */
 #define LDST_OFFSET_SHIFT	8
 #define LDST_OFFSET_MASK	(0xff << LDST_OFFSET_SHIFT)
 
 /* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
 /* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
-#define LDOFF_CHG_SHARE_SHIFT	     0
-#define LDOFF_CHG_SHARE_MASK	     (0x3 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_NEVER	     (0x1 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_OK_NO_PROP   (0x2 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_OK_PROP      (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_SHIFT		0
+#define LDOFF_CHG_SHARE_MASK		(0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER		(0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP	(0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP		(0x3 << LDOFF_CHG_SHARE_SHIFT)
 
 #define LDOFF_ENABLE_AUTO_NFIFO		(1 << 2)
 #define LDOFF_DISABLE_AUTO_NFIFO	(1 << 3)
@@ -245,24 +245,24 @@
 #define LDOFF_CHG_NONSEQLIODN_NON_SEQ	(0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
 #define LDOFF_CHG_NONSEQLIODN_TRUSTED	(0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
 
-#define LDOFF_CHG_SEQLIODN_SHIFT     6
-#define LDOFF_CHG_SEQLIODN_MASK      (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_SEQ	     (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_NON_SEQ   (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_TRUSTED   (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SHIFT	6
+#define LDOFF_CHG_SEQLIODN_MASK		(0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ		(0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ	(0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED	(0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
 
-/* Data length in bytes					*/
+/* Data length in bytes	*/
 #define LDST_LEN_SHIFT		0
 #define LDST_LEN_MASK		(0xff << LDST_LEN_SHIFT)
 
 /* Special Length definitions when dst=deco-ctrl */
-#define LDLEN_ENABLE_OSL_COUNT	    (1 << 7)
-#define LDLEN_RST_CHA_OFIFO_PTR     (1 << 6)
-#define LDLEN_RST_OFIFO		    (1 << 5)
-#define LDLEN_SET_OFIFO_OFF_VALID   (1 << 4)
-#define LDLEN_SET_OFIFO_OFF_RSVD    (1 << 3)
-#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
-#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+#define LDLEN_ENABLE_OSL_COUNT		(1 << 7)
+#define LDLEN_RST_CHA_OFIFO_PTR		(1 << 6)
+#define LDLEN_RST_OFIFO			(1 << 5)
+#define LDLEN_SET_OFIFO_OFF_VALID	(1 << 4)
+#define LDLEN_SET_OFIFO_OFF_RSVD	(1 << 3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT	0
+#define LDLEN_SET_OFIFO_OFFSET_MASK	(3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
 
 /*
  * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
@@ -299,14 +299,14 @@
 #define FIFOLDST_VLF		(1 << FIFOLDST_SGF_SHIFT)
 
 /* Immediate - Data follows command in descriptor */
-#define FIFOLD_IMM_SHIFT      23
-#define FIFOLD_IMM_MASK       (1 << FIFOLD_IMM_SHIFT)
-#define FIFOLD_IMM	      (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM_SHIFT	23
+#define FIFOLD_IMM_MASK		(1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM		(1 << FIFOLD_IMM_SHIFT)
 
 /* Continue - Not the last FIFO store to come */
-#define FIFOST_CONT_SHIFT     23
-#define FIFOST_CONT_MASK      (1 << FIFOST_CONT_SHIFT)
-#define FIFOST_CONT_MASK      (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT_SHIFT	23
+#define FIFOST_CONT_MASK	(1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT_MASK	(1 << FIFOST_CONT_SHIFT)
 
 /*
  * Extended Length - use 32-bit extended length that
@@ -377,13 +377,13 @@
 #define FIFOST_TYPE_PKHA_B	 (0x0d << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_E_JKEK  (0x22 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_E_TKEK  (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK	 (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK	 (0x23 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_KEY_KEK	 (0x24 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_KEY_TKEK	 (0x25 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_SPLIT_KEK	 (0x26 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_SPLIT_TKEK	 (0x27 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_OUTFIFO_KEK  (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK	 (0x28 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_RNGSTORE	 (0x34 << FIFOST_TYPE_SHIFT)
@@ -612,7 +612,7 @@
 #define OP_PCL_TLS10_AES_256_CBC_SHA_16		 0xc021
 #define OP_PCL_TLS10_AES_256_CBC_SHA_17		 0xc022
 
-/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5	    0x0023 */
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5	0x0023 */
 
 #define OP_PCL_TLS10_3DES_EDE_CBC_SHA		 0x001f
 #define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2		 0x008b
@@ -736,7 +736,7 @@
 #define OP_PCL_TLS11_AES_256_CBC_SHA_16		 0xc021
 #define OP_PCL_TLS11_AES_256_CBC_SHA_17		 0xc022
 
-/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5	    0x0023 */
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5	0x0023 */
 
 #define OP_PCL_TLS11_3DES_EDE_CBC_SHA		 0x001f
 #define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2		 0x008b
@@ -858,7 +858,7 @@
 #define OP_PCL_TLS12_AES_256_CBC_SHA_16		 0xc021
 #define OP_PCL_TLS12_AES_256_CBC_SHA_17		 0xc022
 
-/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5	    0x0023 */
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5	0x0023 */
 
 #define OP_PCL_TLS12_3DES_EDE_CBC_SHA		 0x001f
 #define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2		 0x008b
@@ -920,14 +920,14 @@
 
 #define OP_PCL_TLS12_RC4_40_SHA			 0x0028
 
-/* #define OP_PCL_TLS12_AES_128_CBC_SHA256	    0x003c */
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256	0x003c */
 #define OP_PCL_TLS12_AES_128_CBC_SHA256_2	 0x003e
 #define OP_PCL_TLS12_AES_128_CBC_SHA256_3	 0x003f
 #define OP_PCL_TLS12_AES_128_CBC_SHA256_4	 0x0040
 #define OP_PCL_TLS12_AES_128_CBC_SHA256_5	 0x0067
 #define OP_PCL_TLS12_AES_128_CBC_SHA256_6	 0x006c
 
-/* #define OP_PCL_TLS12_AES_256_CBC_SHA256	    0x003d */
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256	0x003d */
 #define OP_PCL_TLS12_AES_256_CBC_SHA256_2	 0x0068
 #define OP_PCL_TLS12_AES_256_CBC_SHA256_3	 0x0069
 #define OP_PCL_TLS12_AES_256_CBC_SHA256_4	 0x006a
@@ -996,7 +996,7 @@
 #define OP_PCL_DTLS_AES_256_CBC_SHA_16		 0xc021
 #define OP_PCL_DTLS_AES_256_CBC_SHA_17		 0xc022
 
-/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5		    0x0023 */
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5		0x0023 */
 
 #define OP_PCL_DTLS_3DES_EDE_CBC_SHA		 0x001f
 #define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2		 0x008b
@@ -1181,92 +1181,92 @@
 #define OP_ALG_ENCRYPT		1
 
 /* PKHA algorithm type set */
-#define OP_ALG_PK		     0x00800000
-#define OP_ALG_PK_FUN_MASK	     0x3f /* clrmem, modmath, or cpymem */
+#define OP_ALG_PK		0x00800000
+#define OP_ALG_PK_FUN_MASK	0x3f /* clrmem, modmath, or cpymem */
 
 /* PKHA mode clear memory functions */
-#define OP_ALG_PKMODE_A_RAM	     0x80000
-#define OP_ALG_PKMODE_B_RAM	     0x40000
-#define OP_ALG_PKMODE_E_RAM	     0x20000
-#define OP_ALG_PKMODE_N_RAM	     0x10000
-#define OP_ALG_PKMODE_CLEARMEM	     0x00001
+#define OP_ALG_PKMODE_A_RAM	0x80000
+#define OP_ALG_PKMODE_B_RAM	0x40000
+#define OP_ALG_PKMODE_E_RAM	0x20000
+#define OP_ALG_PKMODE_N_RAM	0x10000
+#define OP_ALG_PKMODE_CLEARMEM	0x00001
 
 /* PKHA mode modular-arithmetic functions */
-#define OP_ALG_PKMODE_MOD_IN_MONTY   0x80000
-#define OP_ALG_PKMODE_MOD_OUT_MONTY  0x40000
-#define OP_ALG_PKMODE_MOD_F2M	     0x20000
-#define OP_ALG_PKMODE_MOD_R2_IN      0x10000
-#define OP_ALG_PKMODE_PRJECTV	     0x00800
-#define OP_ALG_PKMODE_TIME_EQ	     0x400
-#define OP_ALG_PKMODE_OUT_B	     0x000
-#define OP_ALG_PKMODE_OUT_A	     0x100
-#define OP_ALG_PKMODE_MOD_ADD	     0x002
-#define OP_ALG_PKMODE_MOD_SUB_AB     0x003
-#define OP_ALG_PKMODE_MOD_SUB_BA     0x004
-#define OP_ALG_PKMODE_MOD_MULT	     0x005
-#define OP_ALG_PKMODE_MOD_EXPO	     0x006
-#define OP_ALG_PKMODE_MOD_REDUCT     0x007
-#define OP_ALG_PKMODE_MOD_INV	     0x008
-#define OP_ALG_PKMODE_MOD_ECC_ADD    0x009
-#define OP_ALG_PKMODE_MOD_ECC_DBL    0x00a
-#define OP_ALG_PKMODE_MOD_ECC_MULT   0x00b
-#define OP_ALG_PKMODE_MOD_MONT_CNST  0x00c
-#define OP_ALG_PKMODE_MOD_CRT_CNST   0x00d
-#define OP_ALG_PKMODE_MOD_GCD	     0x00e
-#define OP_ALG_PKMODE_MOD_PRIMALITY  0x00f
+#define OP_ALG_PKMODE_MOD_IN_MONTY	0x80000
+#define OP_ALG_PKMODE_MOD_OUT_MONTY	0x40000
+#define OP_ALG_PKMODE_MOD_F2M		0x20000
+#define OP_ALG_PKMODE_MOD_R2_IN		0x10000
+#define OP_ALG_PKMODE_PRJECTV		0x00800
+#define OP_ALG_PKMODE_TIME_EQ		0x400
+#define OP_ALG_PKMODE_OUT_B		0x000
+#define OP_ALG_PKMODE_OUT_A		0x100
+#define OP_ALG_PKMODE_MOD_ADD		0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB	0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA	0x004
+#define OP_ALG_PKMODE_MOD_MULT		0x005
+#define OP_ALG_PKMODE_MOD_EXPO		0x006
+#define OP_ALG_PKMODE_MOD_REDUCT	0x007
+#define OP_ALG_PKMODE_MOD_INV		0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD	0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL	0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT	0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST	0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST	0x00d
+#define OP_ALG_PKMODE_MOD_GCD		0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY	0x00f
 
 /* PKHA mode copy-memory functions */
-#define OP_ALG_PKMODE_SRC_REG_SHIFT  13
-#define OP_ALG_PKMODE_SRC_REG_MASK   (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_SHIFT  10
-#define OP_ALG_PKMODE_DST_REG_MASK   (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_SHIFT  8
-#define OP_ALG_PKMODE_SRC_SEG_MASK   (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_SHIFT  6
-#define OP_ALG_PKMODE_DST_SEG_MASK   (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_SHIFT	13
+#define OP_ALG_PKMODE_SRC_REG_MASK	(7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT	10
+#define OP_ALG_PKMODE_DST_REG_MASK	(7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT	8
+#define OP_ALG_PKMODE_SRC_SEG_MASK	(3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT	6
+#define OP_ALG_PKMODE_DST_SEG_MASK	(3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
 
-#define OP_ALG_PKMODE_SRC_REG_A      (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_REG_B      (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_REG_N      (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_A      (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_B      (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_E      (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_N      (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_0      (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_1      (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_2      (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_3      (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_0      (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_1      (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_2      (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_3      (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_CPYMEM_N_SZ    0x80
-#define OP_ALG_PKMODE_CPYMEM_SRC_SZ  0x81
+#define OP_ALG_PKMODE_SRC_REG_A		(0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B		(1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N		(3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A		(0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B		(1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E		(2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N		(3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0		(0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1		(1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2		(2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3		(3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0		(0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1		(1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2		(2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3		(3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_CPYMEM_N_SZ	0x80
+#define OP_ALG_PKMODE_CPYMEM_SRC_SZ	0x81
 
 /*
  * SEQ_IN_PTR Command Constructs
  */
 
 /* Release Buffers */
-#define SQIN_RBS	       0x04000000
+#define SQIN_RBS	0x04000000
 
 /* Sequence pointer is really a descriptor */
-#define SQIN_INL	       0x02000000
+#define SQIN_INL	0x02000000
 
 /* Sequence pointer is a scatter-gather table */
-#define SQIN_SGF	       0x01000000
+#define SQIN_SGF	0x01000000
 
 /* Appends to a previous pointer */
-#define SQIN_PRE	       0x00800000
+#define SQIN_PRE	0x00800000
 
 /* Use extended length following pointer */
-#define SQIN_EXT	       0x00400000
+#define SQIN_EXT	0x00400000
 
 /* Restore sequence with pointer/length */
-#define SQIN_RTO	       0x00200000
+#define SQIN_RTO	0x00200000
 
 /* Replace job descriptor */
-#define SQIN_RJD	       0x00100000
+#define SQIN_RJD	0x00100000
 
 #define SQIN_LEN_SHIFT		 0
 #define SQIN_LEN_MASK		(0xffff << SQIN_LEN_SHIFT)
@@ -1276,19 +1276,19 @@
  */
 
 /* Sequence pointer is a scatter-gather table */
-#define SQOUT_SGF	       0x01000000
+#define SQOUT_SGF	0x01000000
 
 /* Appends to a previous pointer */
-#define SQOUT_PRE	       0x00800000
+#define SQOUT_PRE	0x00800000
 
 /* Restore sequence with pointer/length */
-#define SQOUT_RTO	       0x00200000
+#define SQOUT_RTO	0x00200000
 
 /* Use extended length following pointer */
-#define SQOUT_EXT	       0x00400000
+#define SQOUT_EXT	0x00400000
 
-#define SQOUT_LEN_SHIFT		  0
-#define SQOUT_LEN_MASK		 (0xffff << SQOUT_LEN_SHIFT)
+#define SQOUT_LEN_SHIFT		0
+#define SQOUT_LEN_MASK		(0xffff << SQOUT_LEN_SHIFT)
 
 
 /*
@@ -1500,20 +1500,20 @@
 #define NFIFOENTRY_DEST_BOTH	(3 << NFIFOENTRY_DEST_SHIFT)
 
 #define NFIFOENTRY_LC2_SHIFT	29
-#define NFIFOENTRY_LC2_MASK		(1 << NFIFOENTRY_LC2_SHIFT)
-#define NFIFOENTRY_LC2			(1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2_MASK	(1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2		(1 << NFIFOENTRY_LC2_SHIFT)
 
 #define NFIFOENTRY_LC1_SHIFT	28
-#define NFIFOENTRY_LC1_MASK		(1 << NFIFOENTRY_LC1_SHIFT)
-#define NFIFOENTRY_LC1			(1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1_MASK	(1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1		(1 << NFIFOENTRY_LC1_SHIFT)
 
 #define NFIFOENTRY_FC2_SHIFT	27
-#define NFIFOENTRY_FC2_MASK		(1 << NFIFOENTRY_FC2_SHIFT)
-#define NFIFOENTRY_FC2			(1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2_MASK	(1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2		(1 << NFIFOENTRY_FC2_SHIFT)
 
 #define NFIFOENTRY_FC1_SHIFT	26
-#define NFIFOENTRY_FC1_MASK		(1 << NFIFOENTRY_FC1_SHIFT)
-#define NFIFOENTRY_FC1			(1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1_MASK	(1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1		(1 << NFIFOENTRY_FC1_SHIFT)
 
 #define NFIFOENTRY_STYPE_SHIFT	24
 #define NFIFOENTRY_STYPE_MASK	(3 << NFIFOENTRY_STYPE_SHIFT)
@@ -1525,60 +1525,59 @@
 #define NFIFOENTRY_DTYPE_SHIFT	20
 #define NFIFOENTRY_DTYPE_MASK	(0xF << NFIFOENTRY_DTYPE_SHIFT)
 
-#define NFIFOENTRY_DTYPE_SBOX	   (0x0  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_AAD	   (0x1  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_IV	   (0x2  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SAD	   (0x3  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_ICV	   (0xA  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SKIP	   (0xE  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_MSG	   (0xF  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SBOX	(0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD	(0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV	(0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD	(0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV	(0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP	(0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG	(0xF << NFIFOENTRY_DTYPE_SHIFT)
 
-#define NFIFOENTRY_DTYPE_PK_A0	   (0x0  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A1	   (0x1  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A2	   (0x2  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A3	   (0x3  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B0	   (0x4  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B1	   (0x5  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B2	   (0x6  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B3	   (0x7  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_N	   (0x8  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_E	   (0x9  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A	   (0xC  << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B	   (0xD  << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A0	(0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1	(0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2	(0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3	(0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0	(0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1	(0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2	(0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3	(0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N	(0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E	(0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A	(0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B	(0xD << NFIFOENTRY_DTYPE_SHIFT)
 
 
 #define NFIFOENTRY_BND_SHIFT	19
-#define NFIFOENTRY_BND_MASK		(1 << NFIFOENTRY_BND_SHIFT)
-#define NFIFOENTRY_BND			(1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND_MASK	(1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND		(1 << NFIFOENTRY_BND_SHIFT)
 
 #define NFIFOENTRY_PTYPE_SHIFT	16
 #define NFIFOENTRY_PTYPE_MASK	(0x7 << NFIFOENTRY_PTYPE_SHIFT)
 
-#define NFIFOENTRY_PTYPE_ZEROS	       (0x0  << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NOZEROS   (0x1  << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_INCREMENT     (0x2  << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND	       (0x3  << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_ZEROS_NZ      (0x4  << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NZ_LZ     (0x5  << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_N	       (0x6  << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NZ_N      (0x7  << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS		(0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS	(0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT	(0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND		(0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ	(0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ	(0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N		(0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N	(0x7 << NFIFOENTRY_PTYPE_SHIFT)
 
-#define NFIFOENTRY_OC_SHIFT		15
-#define NFIFOENTRY_OC_MASK		(1 << NFIFOENTRY_OC_SHIFT)
-#define NFIFOENTRY_OC			(1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC_SHIFT	15
+#define NFIFOENTRY_OC_MASK	(1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC		(1 << NFIFOENTRY_OC_SHIFT)
 
 #define NFIFOENTRY_AST_SHIFT	14
-#define NFIFOENTRY_AST_MASK		(1 << NFIFOENTRY_OC_SHIFT)
-#define NFIFOENTRY_AST			(1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST_MASK	(1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST		(1 << NFIFOENTRY_OC_SHIFT)
 
-#define NFIFOENTRY_BM_SHIFT		11
-#define NFIFOENTRY_BM_MASK		(1 << NFIFOENTRY_BM_SHIFT)
-#define NFIFOENTRY_BM			(1 << NFIFOENTRY_BM_SHIFT)
-
-#define NFIFOENTRY_PS_SHIFT		10
-#define NFIFOENTRY_PS_MASK		(1 << NFIFOENTRY_PS_SHIFT)
-#define NFIFOENTRY_PS			(1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_BM_SHIFT	11
+#define NFIFOENTRY_BM_MASK	(1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM		(1 << NFIFOENTRY_BM_SHIFT)
 
+#define NFIFOENTRY_PS_SHIFT	10
+#define NFIFOENTRY_PS_MASK	(1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS		(1 << NFIFOENTRY_PS_SHIFT)
 
 #define NFIFOENTRY_DLEN_SHIFT	0
 #define NFIFOENTRY_DLEN_MASK	(0xFFF << NFIFOENTRY_DLEN_SHIFT)
@@ -1591,15 +1590,15 @@
  */
 
 /* IPSec ESP CBC Encap/Decap Options */
-#define PDBOPTS_ESPCBC_ARSNONE	0x00   /* no antireplay window		    */
-#define PDBOPTS_ESPCBC_ARS32	0x40   /* 32-entry antireplay window	    */
-#define PDBOPTS_ESPCBC_ARS64	0xc0   /* 64-entry antireplay window	    */
-#define PDBOPTS_ESPCBC_IVSRC	0x20   /* IV comes from internal random gen */
-#define PDBOPTS_ESPCBC_ESN	0x10   /* extended sequence included	    */
-#define PDBOPTS_ESPCBC_OUTFMT	0x08   /* output only decapsulation (decap) */
-#define PDBOPTS_ESPCBC_IPHDRSRC 0x08   /* IP header comes from PDB (encap)  */
-#define PDBOPTS_ESPCBC_INCIPHDR 0x04   /* Prepend IP header to output frame */
-#define PDBOPTS_ESPCBC_IPVSN	0x02   /* process IPv6 header		    */
-#define PDBOPTS_ESPCBC_TUNNEL	0x01   /* tunnel mode next-header byte	    */
+#define PDBOPTS_ESPCBC_ARSNONE	0x00	/* no antireplay window	*/
+#define PDBOPTS_ESPCBC_ARS32	0x40	/* 32-entry antireplay window */
+#define PDBOPTS_ESPCBC_ARS64	0xc0	/* 64-entry antireplay window */
+#define PDBOPTS_ESPCBC_IVSRC	0x20	/* IV comes from internal random gen */
+#define PDBOPTS_ESPCBC_ESN	0x10	/* extended sequence included */
+#define PDBOPTS_ESPCBC_OUTFMT	0x08	/* output only decapsulation (decap) */
+#define PDBOPTS_ESPCBC_IPHDRSRC 0x08	/* IP header comes from PDB (encap) */
+#define PDBOPTS_ESPCBC_INCIPHDR 0x04	/* Prepend IP header to output frame */
+#define PDBOPTS_ESPCBC_IPVSN	0x02	/* process IPv6 header */
+#define PDBOPTS_ESPCBC_TUNNEL	0x01	/* tunnel mode next-header byte */
 
 #endif /* DESC_H */

From a2ecb155a30fea933cc539fc2064fef825fd9511 Mon Sep 17 00:00:00 2001
From: Kim Phillips <kim.phillips@freescale.com>
Date: Mon, 12 Dec 2011 14:59:15 -0600
Subject: [PATCH 45/54] crypto: caam - fix polarity of "propagate error" logic

the polarity of the definition for error propagation was reverse
in the initial desc.h.  Fix desc.h and its users.

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/caam/caamalg.c     | 10 +++++-----
 drivers/crypto/caam/desc.h        |  4 ++--
 drivers/crypto/caam/desc_constr.h |  7 ++++---
 3 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index a5bc706f57bf..e73cf2e8110a 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -113,7 +113,7 @@ static inline void append_dec_shr_done(u32 *desc)
 
 	jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
 	set_jump_tgt_here(desc, jump_cmd);
-	append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
 }
 
 /*
@@ -213,7 +213,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* Propagate errors from shared to job descriptor */
-	append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
 }
 
 static int aead_set_sh_desc(struct crypto_aead *aead)
@@ -310,7 +310,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	/* Only propagate error immediately if shared */
 	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
 	set_jump_tgt_here(desc, key_jump_cmd);
-	append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
 	set_jump_tgt_here(desc, jump_cmd);
 
 	/* Class 2 operation */
@@ -683,7 +683,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* Propagate errors from shared to job descriptor */
-	append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
 
 	/* Load iv */
 	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
@@ -724,7 +724,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 	/* For aead, only propagate error immediately if shared */
 	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
 	set_jump_tgt_here(desc, key_jump_cmd);
-	append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
 	set_jump_tgt_here(desc, jump_cmd);
 
 	/* load IV */
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 6984cc564932..a17c2958dab1 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -233,8 +233,8 @@
 #define LDOFF_CHG_SHARE_SHIFT		0
 #define LDOFF_CHG_SHARE_MASK		(0x3 << LDOFF_CHG_SHARE_SHIFT)
 #define LDOFF_CHG_SHARE_NEVER		(0x1 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_OK_NO_PROP	(0x2 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_OK_PROP		(0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP		(0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP	(0x3 << LDOFF_CHG_SHARE_SHIFT)
 
 #define LDOFF_ENABLE_AUTO_NFIFO		(1 << 2)
 #define LDOFF_DISABLE_AUTO_NFIFO	(1 << 3)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 0991323cf3fd..348b882275f0 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -18,9 +18,10 @@
 #define PRINT_POS
 #endif
 
-#define SET_OK_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
-			    LDST_SRCDST_WORD_DECOCTRL | \
-			    (LDOFF_CHG_SHARE_OK_PROP << LDST_OFFSET_SHIFT))
+#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+			       LDST_SRCDST_WORD_DECOCTRL | \
+			       (LDOFF_CHG_SHARE_OK_NO_PROP << \
+				LDST_OFFSET_SHIFT))
 #define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
 				LDST_SRCDST_WORD_DECOCTRL | \
 				(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))

From a1a38c4c6a0f73e8a9dc217c0e4e0ac483a89f25 Mon Sep 17 00:00:00 2001
From: Kim Phillips <kim.phillips@freescale.com>
Date: Mon, 12 Dec 2011 14:59:16 -0600
Subject: [PATCH 46/54] crypto: caam - remove DECO access initialization code

Access to the SEC4 DECOs (DEscriptor COntrollers) (for debug purposes)
isn't supported or used, and its register access initialization code
erroneously makes illegal i/o accesses that show up as errors when
run under simulation.  Remove it until proper support (via DECORR)
is added.

Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/caam/ctrl.c | 13 -------------
 drivers/crypto/caam/regs.h |  1 -
 2 files changed, 14 deletions(-)

diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 048bb2343231..8ae3ba2a160d 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -52,8 +52,6 @@ static int caam_probe(struct platform_device *pdev)
 	struct caam_ctrl __iomem *ctrl;
 	struct caam_full __iomem *topregs;
 	struct caam_drv_private *ctrlpriv;
-	struct caam_deco **deco;
-	u32 deconum;
 #ifdef CONFIG_DEBUG_FS
 	struct caam_perfmon *perfmon;
 #endif
@@ -92,17 +90,6 @@ static int caam_probe(struct platform_device *pdev)
 	if (sizeof(dma_addr_t) == sizeof(u64))
 		dma_set_mask(dev, DMA_BIT_MASK(36));
 
-	/* Find out how many DECOs are present */
-	deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) &
-		   CHA_NUM_DECONUM_MASK) >> CHA_NUM_DECONUM_SHIFT;
-
-	ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *),
-				 GFP_KERNEL);
-
-	deco = (struct caam_deco __force **)&topregs->deco;
-	for (d = 0; d < deconum; d++)
-		ctrlpriv->deco[d] = deco[d];
-
 	/*
 	 * Detect and enable JobRs
 	 * First, find out how many ring spec'ed, allocate references
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index aee394e39056..e9f7a70cdd5e 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -657,7 +657,6 @@ struct caam_full {
 	u64 rsvd[512];
 	struct caam_assurance assure;
 	struct caam_queue_if qi;
-	struct caam_deco *deco;
 };
 
 #endif /* REGS_H */

From a9c57a9c6afb41951aa08317d0cd65a66aed38df Mon Sep 17 00:00:00 2001
From: Jamie Iles <jamie@jamieiles.com>
Date: Tue, 13 Dec 2011 09:54:06 +0000
Subject: [PATCH 47/54] crypto: picoxcell - fix boolean and / or confusion

The AES engine only supports 128 and 256 bit keys so we should correctly
test for that.

Cc: Herbert Xu <herbert@gondor.apana.org.au>
Reported-by: Joe Perches <joe@perches.com>
Signed-off-by: Jamie Iles <jamie@jamieiles.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/picoxcell_crypto.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 9b1571c2a5a4..58480d009324 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -873,7 +873,7 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 	 * request for any other size (192 bits) then we need to do a software
 	 * fallback.
 	 */
-	if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
+	if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
 	    ctx->sw_cipher) {
 		/*
 		 * Set the fallback transform to use the same request flags as
@@ -886,7 +886,7 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 		err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
 		if (err)
 			goto sw_setkey_failed;
-	} else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
+	} else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
 		   !ctx->sw_cipher)
 		err = -EINVAL;
 

From 2470a2b2c33455440d0452c8c0248d113e8502a5 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 13 Dec 2011 12:52:51 +0200
Subject: [PATCH 48/54] crypto: lrw - remove dependency on EXPERIMENTAL

LRW has been EXPERIMENTAL since it was introduced in 2006. I'd say by now
it has seen enough testing to justify removal of EXPERIMENTAL tag.

CC: Rik Snel <rsnel@cube.dyndns.org>
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/Kconfig | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index b8d1b10aedab..3cc7511c810a 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -241,8 +241,7 @@ config CRYPTO_ECB
 	  the input block by block.
 
 config CRYPTO_LRW
-	tristate "LRW support (EXPERIMENTAL)"
-	depends on EXPERIMENTAL
+	tristate "LRW support"
 	select CRYPTO_BLKCIPHER
 	select CRYPTO_MANAGER
 	select CRYPTO_GF128MUL

From 5bcf8e6dd49fec57b5dd96a643c53a048272b625 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 13 Dec 2011 12:52:56 +0200
Subject: [PATCH 49/54] crypto: xts - remove dependency on EXPERIMENTAL

XTS has been EXPERIMENTAL since it was introduced in 2007. I'd say by now
it has seen enough testing to justify removal of EXPERIMENTAL tag.

CC: Rik Snel <rsnel@cube.dyndns.org>
Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/Kconfig | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index 3cc7511c810a..a77cd8eee884 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -261,8 +261,7 @@ config CRYPTO_PCBC
 	  This block cipher algorithm is required for RxRPC.
 
 config CRYPTO_XTS
-	tristate "XTS support (EXPERIMENTAL)"
-	depends on EXPERIMENTAL
+	tristate "XTS support"
 	select CRYPTO_BLKCIPHER
 	select CRYPTO_MANAGER
 	select CRYPTO_GF128MUL

From e7cda5d27ed3febf277fe410687c977ae1a31a25 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 13 Dec 2011 12:53:01 +0200
Subject: [PATCH 50/54] crypto: twofish-x86_64-3way - select LRW and XTS

twofish-x86_64-3way uses functions from LRW and XTS modules, so selecting would
appear to be better option than using #ifdefs in twofish_glue_3way.c to
enable/disable LRW and XTS features.

This also fixes build problem when twofish-x86_64-3way would be build into
kernel but XTS/LRW are build as modules.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/Kconfig | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index a77cd8eee884..3cd303b70218 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -876,6 +876,8 @@ config CRYPTO_TWOFISH_X86_64_3WAY
 	select CRYPTO_ALGAPI
 	select CRYPTO_TWOFISH_COMMON
 	select CRYPTO_TWOFISH_X86_64
+	select CRYPTO_LRW
+	select CRYPTO_XTS
 	help
 	  Twofish cipher algorithm (x86_64, 3-way parallel).
 

From 88715b9ade718564fd8b1318735826370481366b Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 13 Dec 2011 12:53:07 +0200
Subject: [PATCH 51/54] crypto: twofish-x86_64-3way - remove unneeded LRW/XTS
 #ifdefs

Since LRW & XTS are selected by twofish-x86_64-3way, we don't need these
#ifdefs anymore.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 arch/x86/crypto/twofish_glue_3way.c | 32 -----------------------------
 1 file changed, 32 deletions(-)

diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 954f59eeb7b4..7fee8c152f93 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -35,14 +35,6 @@
 #include <crypto/lrw.h>
 #include <crypto/xts.h>
 
-#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
-#define HAS_LRW
-#endif
-
-#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
-#define HAS_XTS
-#endif
-
 /* regular block cipher functions from twofish_x86_64 module */
 asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
 				const u8 *src);
@@ -442,8 +434,6 @@ static struct crypto_alg blk_ctr_alg = {
 	},
 };
 
-#if defined(HAS_LRW) || defined(HAS_XTS)
-
 static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
 {
 	const unsigned int bsize = TF_BLOCK_SIZE;
@@ -474,10 +464,6 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
 		twofish_dec_blk(ctx, srcdst, srcdst);
 }
 
-#endif
-
-#ifdef HAS_LRW
-
 struct twofish_lrw_ctx {
 	struct lrw_table_ctx lrw_table;
 	struct twofish_ctx twofish_ctx;
@@ -562,10 +548,6 @@ static struct crypto_alg blk_lrw_alg = {
 	},
 };
 
-#endif
-
-#ifdef HAS_XTS
-
 struct twofish_xts_ctx {
 	struct twofish_ctx tweak_ctx;
 	struct twofish_ctx crypt_ctx;
@@ -655,8 +637,6 @@ static struct crypto_alg blk_xts_alg = {
 	},
 };
 
-#endif
-
 int __init init(void)
 {
 	int err;
@@ -670,27 +650,19 @@ int __init init(void)
 	err = crypto_register_alg(&blk_ctr_alg);
 	if (err)
 		goto ctr_err;
-#ifdef HAS_LRW
 	err = crypto_register_alg(&blk_lrw_alg);
 	if (err)
 		goto blk_lrw_err;
-#endif
-#ifdef HAS_XTS
 	err = crypto_register_alg(&blk_xts_alg);
 	if (err)
 		goto blk_xts_err;
-#endif
 
 	return 0;
 
-#ifdef HAS_XTS
 	crypto_unregister_alg(&blk_xts_alg);
 blk_xts_err:
-#endif
-#ifdef HAS_LRW
 	crypto_unregister_alg(&blk_lrw_alg);
 blk_lrw_err:
-#endif
 	crypto_unregister_alg(&blk_ctr_alg);
 ctr_err:
 	crypto_unregister_alg(&blk_cbc_alg);
@@ -702,12 +674,8 @@ ecb_err:
 
 void __exit fini(void)
 {
-#ifdef HAS_XTS
 	crypto_unregister_alg(&blk_xts_alg);
-#endif
-#ifdef HAS_LRW
 	crypto_unregister_alg(&blk_lrw_alg);
-#endif
 	crypto_unregister_alg(&blk_ctr_alg);
 	crypto_unregister_alg(&blk_cbc_alg);
 	crypto_unregister_alg(&blk_ecb_alg);

From feaf0cfc263ec778fa166e96ac6a9ef37854fec9 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 13 Dec 2011 12:53:12 +0200
Subject: [PATCH 52/54] crypto: serpent-sse2 - select LRW and XTS

serpent-sse2 uses functions from LRW and XTS modules, so selecting would appear
to be better option than using #ifdefs in serpent_sse2_glue.c to enable/disable
LRW and XTS features.

This also fixes build problem when serpent-sse2 would be build into kernel but
XTS/LRW are build as modules.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/Kconfig | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index 3cd303b70218..d7d4d4e0ee18 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -770,6 +770,8 @@ config CRYPTO_SERPENT_SSE2_X86_64
 	select CRYPTO_ALGAPI
 	select CRYPTO_CRYPTD
 	select CRYPTO_SERPENT
+	select CRYPTO_LRW
+	select CRYPTO_XTS
 	help
 	  Serpent cipher algorithm, by Anderson, Biham & Knudsen.
 
@@ -788,6 +790,8 @@ config CRYPTO_SERPENT_SSE2_586
 	select CRYPTO_ALGAPI
 	select CRYPTO_CRYPTD
 	select CRYPTO_SERPENT
+	select CRYPTO_LRW
+	select CRYPTO_XTS
 	help
 	  Serpent cipher algorithm, by Anderson, Biham & Knudsen.
 

From 7ba8babf84fa4e9b648e247223043785f596dd23 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 13 Dec 2011 12:53:17 +0200
Subject: [PATCH 53/54] crypto: serpent-sse2 - remove unneeded LRW/XTS #ifdefs

Since LRW & XTS are selected by serpent-sse2, we don't need these #ifdefs
anymore.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 arch/x86/crypto/serpent_sse2_glue.c | 40 -----------------------------
 1 file changed, 40 deletions(-)

diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 2f5c304653f4..7955a9b76b91 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -47,14 +47,6 @@
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
 
-#if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
-#define HAS_LRW
-#endif
-
-#if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
-#define HAS_XTS
-#endif
-
 struct async_serpent_ctx {
 	struct cryptd_ablkcipher *cryptd_tfm;
 };
@@ -470,8 +462,6 @@ static struct crypto_alg blk_ctr_alg = {
 	},
 };
 
-#if defined(HAS_LRW) || defined(HAS_XTS)
-
 struct crypt_priv {
 	struct serpent_ctx *ctx;
 	bool fpu_enabled;
@@ -511,10 +501,6 @@ static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
 		__serpent_decrypt(ctx->ctx, srcdst, srcdst);
 }
 
-#endif
-
-#ifdef HAS_LRW
-
 struct serpent_lrw_ctx {
 	struct lrw_table_ctx lrw_table;
 	struct serpent_ctx serpent_ctx;
@@ -620,10 +606,6 @@ static struct crypto_alg blk_lrw_alg = {
 	},
 };
 
-#endif
-
-#ifdef HAS_XTS
-
 struct serpent_xts_ctx {
 	struct serpent_ctx tweak_ctx;
 	struct serpent_ctx crypt_ctx;
@@ -730,8 +712,6 @@ static struct crypto_alg blk_xts_alg = {
 	},
 };
 
-#endif
-
 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
 			unsigned int key_len)
 {
@@ -930,8 +910,6 @@ static struct crypto_alg ablk_ctr_alg = {
 	},
 };
 
-#ifdef HAS_LRW
-
 static int ablk_lrw_init(struct crypto_tfm *tfm)
 {
 	struct cryptd_ablkcipher *cryptd_tfm;
@@ -970,10 +948,6 @@ static struct crypto_alg ablk_lrw_alg = {
 	},
 };
 
-#endif
-
-#ifdef HAS_XTS
-
 static int ablk_xts_init(struct crypto_tfm *tfm)
 {
 	struct cryptd_ablkcipher *cryptd_tfm;
@@ -1010,8 +984,6 @@ static struct crypto_alg ablk_xts_alg = {
 	},
 };
 
-#endif
-
 static int __init serpent_sse2_init(void)
 {
 	int err;
@@ -1039,36 +1011,28 @@ static int __init serpent_sse2_init(void)
 	err = crypto_register_alg(&ablk_ctr_alg);
 	if (err)
 		goto ablk_ctr_err;
-#ifdef HAS_LRW
 	err = crypto_register_alg(&blk_lrw_alg);
 	if (err)
 		goto blk_lrw_err;
 	err = crypto_register_alg(&ablk_lrw_alg);
 	if (err)
 		goto ablk_lrw_err;
-#endif
-#ifdef HAS_XTS
 	err = crypto_register_alg(&blk_xts_alg);
 	if (err)
 		goto blk_xts_err;
 	err = crypto_register_alg(&ablk_xts_alg);
 	if (err)
 		goto ablk_xts_err;
-#endif
 	return err;
 
-#ifdef HAS_XTS
 	crypto_unregister_alg(&ablk_xts_alg);
 ablk_xts_err:
 	crypto_unregister_alg(&blk_xts_alg);
 blk_xts_err:
-#endif
-#ifdef HAS_LRW
 	crypto_unregister_alg(&ablk_lrw_alg);
 ablk_lrw_err:
 	crypto_unregister_alg(&blk_lrw_alg);
 blk_lrw_err:
-#endif
 	crypto_unregister_alg(&ablk_ctr_alg);
 ablk_ctr_err:
 	crypto_unregister_alg(&ablk_cbc_alg);
@@ -1086,14 +1050,10 @@ blk_ecb_err:
 
 static void __exit serpent_sse2_exit(void)
 {
-#ifdef HAS_XTS
 	crypto_unregister_alg(&ablk_xts_alg);
 	crypto_unregister_alg(&blk_xts_alg);
-#endif
-#ifdef HAS_LRW
 	crypto_unregister_alg(&ablk_lrw_alg);
 	crypto_unregister_alg(&blk_lrw_alg);
-#endif
 	crypto_unregister_alg(&ablk_ctr_alg);
 	crypto_unregister_alg(&ablk_cbc_alg);
 	crypto_unregister_alg(&ablk_ecb_alg);

From 08c70fc3a239475122e20b7a21dfae4c264c24f7 Mon Sep 17 00:00:00 2001
From: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Date: Tue, 13 Dec 2011 12:53:22 +0200
Subject: [PATCH 54/54] crypto: gf128mul - remove leftover "(EXPERIMENTAL)" in
 Kconfig

CRYPTO_GF128MUL does not select EXPERIMENTAL anymore so remove the
"(EXPERIMENTAL)" from its name.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/Kconfig | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/crypto/Kconfig b/crypto/Kconfig
index d7d4d4e0ee18..7ae0d0f6d753 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -117,7 +117,7 @@ config CRYPTO_MANAGER_DISABLE_TESTS
 	  algorithm registration.
 
 config CRYPTO_GF128MUL
-	tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
+	tristate "GF(2^128) multiplication functions"
 	help
 	  Efficient table driven implementation of multiplications in the
 	  field GF(2^128).  This is needed by some cypher modes. This