bk://herbert.bkbits.net/cryptodev-2.6
herbert@gondor.apana.org.au|ChangeSet|20050322111924|17506 herbert

# This is a BitKeeper generated diff -Nru style patch.
#
# ChangeSet
#   2005/03/22 22:19:24+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Remap when walk_out crosses page in crypt()
#   
#   This is needed so that we can keep the in_place assignment outside the inner loop.
#   Without this in pathalogical situations we can start out having walk_out being
#   different from walk_in, but when walk_out crosses a page it may converge with
#   walk_in.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/cipher.c
#   2005/03/22 22:18:51+11:00 herbert@gondor.apana.org.au +3 -1
#   [CRYPTO]: Remap when walk_out crosses page in crypt()
# 
# ChangeSet
#   2005/03/22 21:56:53+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Split cbc_process into encrypt/decrypt
#   
#   Rather than taking a branch on the fast path, we might as well split cbc_process
#   into encrypt and decrypt since they don't share anything in common.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/cipher.c
#   2005/03/22 21:56:21+11:00 herbert@gondor.apana.org.au +25 -21
#   [CRYPTO]: Split cbc_process into encrypt/decrypt
# 
# ChangeSet
#   2005/03/22 21:34:04+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Kill obsolete iv check in cbc_process()
#   
#   We have long since stopped using a null cit_iv as a means of doing null encryption.
#   In fact it doesn't work here anyway since we need to copy src into dst to achieve
#   null encryption.
#   
#   No user of cbc_encrypt_iv/cbc_decrypt_iv does this either so let's just get rid of
#   this check which is sitting in the fast path.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/cipher.c
#   2005/03/22 21:33:25+11:00 herbert@gondor.apana.org.au +1 -5
#   [CRYPTO]: Kill obsolete iv check in cbc_process()
# 
# ChangeSet
#   2005/03/22 20:23:48+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Advance walk->data per block in crypt()
#   
#   Now that crypt() no longer calls scatterwalk_done for each block, we need to use
#   other methods to ensure that walk->data gets updated per block.  Without this we'll
#   keep on reading/writing the same block over and over again until we move to the next
#   page.
#   
#   The solution is to update walk->data in scatterwalk_advance.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/scatterwalk.h
#   2005/03/22 20:23:10+11:00 herbert@gondor.apana.org.au +1 -0
#   [CRYPTO]: Advance walk->data per block in crypt()
# 
# crypto/scatterwalk.c
#   2005/03/22 20:23:10+11:00 herbert@gondor.apana.org.au +11 -4
#   [CRYPTO]: Advance walk->data per block in crypt()
#   
#   As walk->data is advanced after each operation, it may now point to the first byte of
#   the next page.  So we need to take that into account when using it to unmap the page.
#   
#   Check sg->length to make sure that we can transfer one byte at least.
# 
# ChangeSet
#   2005/03/21 18:42:12+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Optimise kmap calls in crypt()
#   
#   Perform kmap once (or twice if the buffer is not aligned correctly) per page in
#   crypt() instead of the current code which does it once per block.  Consequently
#   it will yield once per page instead of once per block.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/cipher.c
#   2005/03/21 18:41:41+11:00 herbert@gondor.apana.org.au +11 -7
#   [CRYPTO]: Optimise kmap calls in crypt()
#   
#   Perform kmap once (or twice if the buffer is not aligned correctly) per page in
#   crypt() instead of the current code which does it once per block.  Consequently
#   it will yield once per page instead of once per block.
# 
# ChangeSet
#   2005/03/20 22:19:30+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Eliminate most calls to scatterwalk_copychunks from crypt()
#   
#   Only call scatterwalk_copychunks when the block straddles a page boundary.  This
#   allows crypt() to skip the out-of-line call most of the time.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/scatterwalk.h
#   2005/03/20 22:18:58+11:00 herbert@gondor.apana.org.au +8 -0
#   [CRYPTO]: Eliminate most calls to scatterwalk_copychunks from crypt()
#   
#   Add scatterwalk_advance.
# 
# crypto/scatterwalk.c
#   2005/03/20 22:18:58+11:00 herbert@gondor.apana.org.au +10 -16
#   [CRYPTO]: Eliminate most calls to scatterwalk_copychunks from crypt()
#   
#   Only call scatterwalk_copychunks when the block straddles a page boundary.  Also let
#   the caller do the final walk update.
# 
# crypto/cipher.c
#   2005/03/20 22:18:58+11:00 herbert@gondor.apana.org.au +13 -3
#   [CRYPTO]: Eliminate most calls to scatterwalk_copychunks from crypt()
#   
#   Only call scatterwalk_copychunks from crypt() when the block straddles a page
#   boundary.
# 
# ChangeSet
#   2005/03/20 22:06:18+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Split src/dst handling out from crypt()
#   
#   Move src/dst handling from crypt() into the helpers prepare_src, prepare_dst,
#   complete_src and complete_dst.  complete_src doesn't actually do anything at the
#   moment but is included for completeness.
#   
#   This sets the stage for further optimisations down the track without polluting
#   crypt() itself.
#   
#   These helpers don't belong in scatterwalk.[ch] since they only help the particular
#   way that crypt() is walking the scatter lists.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/cipher.c
#   2005/03/20 22:05:46+11:00 herbert@gondor.apana.org.au +35 -11
#   [CRYPTO]: Split src/dst handling out from crypt()
# 
# ChangeSet
#   2005/03/20 21:21:56+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Handle in_place flag in crypt()
#   
#   Move the handling of in_place into crypt() itself.  This means that we only need two
#   temporary buffers instead of three.  It also allows us to simplify the check in
#   scatterwalk_samebuf.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/scatterwalk.h
#   2005/03/20 21:21:26+11:00 herbert@gondor.apana.org.au +2 -4
#   [CRYPTO]: Handle in_place flag in crypt()
#   
#   Since in_place is now handled together with the page boundary check, it is no longer
#   necessary to optimise for the page boundary case in scatterwalk_samebuf.
# 
# crypto/cipher.c
#   2005/03/20 21:21:26+11:00 herbert@gondor.apana.org.au +10 -15
#   [CRYPTO]: Handle in_place flag in crypt()
#   
#   Move the handling of in_place into crypt() itself.
# 
# ChangeSet
#   2005/03/20 21:18:42+11:00 herbert@gondor.apana.org.au 
#   [CRYPTO]: Do scatterwalk_whichbuf inline
#   
#   scatterwalk_whichbuf is called once for each block which could be as small as 8/16
#   bytes.  So it makes sense to do that work inline.
#   
#   It's also a bit inflexible since we may want to use the temporary buffer even if the
#   block doesn't cross page boundaries.  In particular, we want to do that when the
#   source and destination are the same.
#   
#   So let's replace it with scatterwalk_across_pages.
#   
#   I've also simplified the check in scatterwalk_across_pages.  It is sufficient to only
#   check len_this_page.
#   
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/scatterwalk.h
#   2005/03/20 21:18:09+11:00 herbert@gondor.apana.org.au +6 -1
#   [CRYPTO]: Do scatterwalk_whichbuf inline
#   
#   Remove scatterwalk_whichbuf and add scatterwalk_across_pages.
# 
# crypto/scatterwalk.c
#   2005/03/20 21:18:09+11:00 herbert@gondor.apana.org.au +0 -10
#   [CRYPTO]: Do scatterwalk_whichbuf inline
#   
#   Remove scatterwalk_whichbuf.
# 
# crypto/cipher.c
#   2005/03/20 21:18:09+11:00 herbert@gondor.apana.org.au +10 -2
#   [CRYPTO]: Do scatterwalk_whichbuf inline
#   
#   Use scatterwalk_across_pages instead of scatterwalk_whichbuf for better performance
#   and flexibility.
# 
# ChangeSet
#   2005/03/20 20:36:54+11:00 domen@coderock.org 
#   [CRYPTO]: Fix sparse warnings in tea
#   
#   Signed-off-by: Domen Puncer <domen@coderock.org>
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/tea.c
#   2005/03/20 20:36:21+11:00 domen@coderock.org +2 -2
#   [CRYPTO]: Fix sparse warnings in tea
# 
# ChangeSet
#   2005/03/20 20:30:42+11:00 domen@coderock.org 
#   [CRYPTO]: Fix sparse warnings in blowfish
#   
#   Signed-off-by: Domen Puncer <domen@coderock.org>
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/blowfish.c
#   2005/03/20 20:30:13+11:00 domen@coderock.org +4 -4
#   [CRYPTO]: Fix sparse warnings in blowfish
# 
# ChangeSet
#   2005/03/20 20:24:16+11:00 domen@coderock.org 
#   [CRYPTO]: Fix sparse warning in sha512
#   
#   Signed-off-by: Domen Puncer <domen@coderock.org>
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/sha512.c
#   2005/03/20 20:23:42+11:00 domen@coderock.org +1 -1
#   [CRYPTO]: Fix sparse warning in sha512
# 
# ChangeSet
#   2005/03/20 20:21:42+11:00 domen@coderock.org 
#   [CRYPTO]: Fix sparse warning in sha256
#   
#   Signed-off-by: Domen Puncer <domen@coderock.org>
#   Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
# 
# crypto/sha256.c
#   2005/03/20 20:21:10+11:00 domen@coderock.org +1 -1
#   [CRYPTO]: Fix sparse warning in sha256
# 
diff -Nru a/crypto/blowfish.c b/crypto/blowfish.c
--- a/crypto/blowfish.c	2005-03-23 19:34:18 -08:00
+++ b/crypto/blowfish.c	2005-03-23 19:34:18 -08:00
@@ -349,8 +349,8 @@
 
 static void bf_encrypt(void *ctx, u8 *dst, const u8 *src)
 {
-	const u32 *in_blk = (const u32 *)src;
-	u32 *const out_blk = (u32 *)dst;
+	const __be32 *in_blk = (const __be32 *)src;
+	__be32 *const out_blk = (__be32 *)dst;
 	u32 in32[2], out32[2];
 
 	in32[0] = be32_to_cpu(in_blk[0]);
@@ -362,8 +362,8 @@
 
 static void bf_decrypt(void *ctx, u8 *dst, const u8 *src)
 {
-	const u32 *in_blk = (const u32 *)src;
-	u32 *const out_blk = (u32 *)dst;
+	const __be32 *in_blk = (const __be32 *)src;
+	__be32 *const out_blk = (__be32 *)dst;
 	const u32 *P = ((struct bf_ctx *)ctx)->p;
 	const u32 *S = ((struct bf_ctx *)ctx)->s;
 	u32 yl = be32_to_cpu(in_blk[0]);
diff -Nru a/crypto/cipher.c b/crypto/cipher.c
--- a/crypto/cipher.c	2005-03-23 19:34:18 -08:00
+++ b/crypto/cipher.c	2005-03-23 19:34:18 -08:00
@@ -11,18 +11,20 @@
  * any later version.
  *
  */
+#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/crypto.h>
 #include <linux/errno.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 #include <asm/scatterlist.h>
 #include "internal.h"
 #include "scatterwalk.h"
 
 typedef void (cryptfn_t)(void *, u8 *, const u8 *);
 typedef void (procfn_t)(struct crypto_tfm *, u8 *,
-                        u8*, cryptfn_t, int enc, void *, int);
+                        u8*, cryptfn_t, void *);
 
 static inline void xor_64(u8 *a, const u8 *b)
 {
@@ -37,7 +39,47 @@
 	((u32 *)a)[2] ^= ((u32 *)b)[2];
 	((u32 *)a)[3] ^= ((u32 *)b)[3];
 }
+ 
+static inline void *prepare_src(struct scatter_walk *walk, int bsize,
+				void *tmp, int in_place)
+{
+	void *src = walk->data;
+	int n = bsize;
+
+	if (unlikely(scatterwalk_across_pages(walk, bsize))) {
+		src = tmp;
+		n = scatterwalk_copychunks(src, walk, bsize, 0);
+	}
+	scatterwalk_advance(walk, n);
+	return src;
+}
+
+static inline void *prepare_dst(struct scatter_walk *walk, int bsize,
+				void *tmp, int in_place)
+{
+	void *dst = walk->data;
+
+	if (unlikely(scatterwalk_across_pages(walk, bsize)) || in_place)
+		dst = tmp;
+	return dst;
+}
 
+static inline void complete_src(struct scatter_walk *walk, int bsize,
+				void *src, int in_place)
+{
+}
+
+static inline void complete_dst(struct scatter_walk *walk, int bsize,
+				void *dst, int in_place)
+{
+	int n = bsize;
+
+	if (unlikely(scatterwalk_across_pages(walk, bsize)))
+		n = scatterwalk_copychunks(dst, walk, bsize, 1);
+	else if (in_place)
+		memcpy(walk->data, dst, bsize);
+	scatterwalk_advance(walk, n);
+}
 
 /* 
  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
@@ -48,7 +90,7 @@
 		 struct scatterlist *dst,
 		 struct scatterlist *src,
                  unsigned int nbytes, cryptfn_t crfn,
-                 procfn_t prfn, int enc, void *info)
+                 procfn_t prfn, void *info)
 {
 	struct scatter_walk walk_in, walk_out;
 	const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
@@ -72,20 +114,26 @@
 
 		scatterwalk_map(&walk_in, 0);
 		scatterwalk_map(&walk_out, 1);
-		src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
-		dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
-		in_place = scatterwalk_samebuf(&walk_in, &walk_out,
-					       src_p, dst_p);
 
-		nbytes -= bsize;
+		in_place = scatterwalk_samebuf(&walk_in, &walk_out);
 
-		scatterwalk_copychunks(src_p, &walk_in, bsize, 0);
-
-		prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
+		do {
+			src_p = prepare_src(&walk_in, bsize, tmp_src,
+					    in_place);
+			dst_p = prepare_dst(&walk_out, bsize, tmp_dst,
+					    in_place);
+
+			prfn(tfm, dst_p, src_p, crfn, info);
+
+			complete_src(&walk_in, bsize, src_p, in_place);
+			complete_dst(&walk_out, bsize, dst_p, in_place);
+
+			nbytes -= bsize;
+		} while (nbytes &&
+			 !scatterwalk_across_pages(&walk_in, bsize) &&
+			 !scatterwalk_across_pages(&walk_out, bsize));
 
 		scatterwalk_done(&walk_in, 0, nbytes);
-
-		scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
 		scatterwalk_done(&walk_out, 1, nbytes);
 
 		if (!nbytes)
@@ -95,33 +143,28 @@
 	}
 }
 
-static void cbc_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
-			cryptfn_t fn, int enc, void *info, int in_place)
+static void cbc_process_encrypt(struct crypto_tfm *tfm, u8 *dst, u8 *src,
+				cryptfn_t fn, void *info)
 {
 	u8 *iv = info;
-	
-	/* Null encryption */
-	if (!iv)
-		return;
-		
-	if (enc) {
-		tfm->crt_u.cipher.cit_xor_block(iv, src);
-		fn(crypto_tfm_ctx(tfm), dst, iv);
-		memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm));
-	} else {
-		u8 stack[in_place ? crypto_tfm_alg_blocksize(tfm) : 0];
-		u8 *buf = in_place ? stack : dst;
-
-		fn(crypto_tfm_ctx(tfm), buf, src);
-		tfm->crt_u.cipher.cit_xor_block(buf, iv);
-		memcpy(iv, src, crypto_tfm_alg_blocksize(tfm));
-		if (buf != dst)
-			memcpy(dst, buf, crypto_tfm_alg_blocksize(tfm));
-	}
+
+	tfm->crt_u.cipher.cit_xor_block(iv, src);
+	fn(crypto_tfm_ctx(tfm), dst, iv);
+	memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm));
+}
+
+static void cbc_process_decrypt(struct crypto_tfm *tfm, u8 *dst, u8 *src,
+				cryptfn_t fn, void *info)
+{
+	u8 *iv = info;
+
+	fn(crypto_tfm_ctx(tfm), dst, src);
+	tfm->crt_u.cipher.cit_xor_block(dst, iv);
+	memcpy(iv, src, crypto_tfm_alg_blocksize(tfm));
 }
 
 static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
-			cryptfn_t fn, int enc, void *info, int in_place)
+			cryptfn_t fn, void *info)
 {
 	fn(crypto_tfm_ctx(tfm), dst, src);
 }
@@ -144,7 +187,7 @@
 {
 	return crypt(tfm, dst, src, nbytes,
 	             tfm->__crt_alg->cra_cipher.cia_encrypt,
-	             ecb_process, 1, NULL);
+	             ecb_process, NULL);
 }
 
 static int ecb_decrypt(struct crypto_tfm *tfm,
@@ -154,7 +197,7 @@
 {
 	return crypt(tfm, dst, src, nbytes,
 	             tfm->__crt_alg->cra_cipher.cia_decrypt,
-	             ecb_process, 1, NULL);
+	             ecb_process, NULL);
 }
 
 static int cbc_encrypt(struct crypto_tfm *tfm,
@@ -164,7 +207,7 @@
 {
 	return crypt(tfm, dst, src, nbytes,
 	             tfm->__crt_alg->cra_cipher.cia_encrypt,
-	             cbc_process, 1, tfm->crt_cipher.cit_iv);
+	             cbc_process_encrypt, tfm->crt_cipher.cit_iv);
 }
 
 static int cbc_encrypt_iv(struct crypto_tfm *tfm,
@@ -174,7 +217,7 @@
 {
 	return crypt(tfm, dst, src, nbytes,
 	             tfm->__crt_alg->cra_cipher.cia_encrypt,
-	             cbc_process, 1, iv);
+	             cbc_process_encrypt, iv);
 }
 
 static int cbc_decrypt(struct crypto_tfm *tfm,
@@ -184,7 +227,7 @@
 {
 	return crypt(tfm, dst, src, nbytes,
 	             tfm->__crt_alg->cra_cipher.cia_decrypt,
-	             cbc_process, 0, tfm->crt_cipher.cit_iv);
+	             cbc_process_decrypt, tfm->crt_cipher.cit_iv);
 }
 
 static int cbc_decrypt_iv(struct crypto_tfm *tfm,
@@ -194,7 +237,7 @@
 {
 	return crypt(tfm, dst, src, nbytes,
 	             tfm->__crt_alg->cra_cipher.cia_decrypt,
-	             cbc_process, 0, iv);
+	             cbc_process_decrypt, iv);
 }
 
 static int nocrypt(struct crypto_tfm *tfm,
diff -Nru a/crypto/scatterwalk.c b/crypto/scatterwalk.c
--- a/crypto/scatterwalk.c	2005-03-23 19:34:18 -08:00
+++ b/crypto/scatterwalk.c	2005-03-23 19:34:18 -08:00
@@ -17,6 +17,7 @@
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/highmem.h>
+#include <asm/bug.h>
 #include <asm/scatterlist.h>
 #include "internal.h"
 #include "scatterwalk.h"
@@ -28,16 +29,6 @@
 	KM_SOFTIRQ1,
 };
 
-void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
-{
-	if (nbytes <= walk->len_this_page &&
-	    (((unsigned long)walk->data) & (PAGE_CACHE_SIZE - 1)) + nbytes <=
-	    PAGE_CACHE_SIZE)
-		return walk->data;
-	else
-		return scratch;
-}
-
 static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
 {
 	if (out)
@@ -55,6 +46,8 @@
 	walk->page = sg->page;
 	walk->len_this_segment = sg->length;
 
+	BUG_ON(!sg->length);
+
 	rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1));
 	walk->len_this_page = min(sg->length, rest_of_page);
 	walk->offset = sg->offset;
@@ -65,13 +58,17 @@
 	walk->data = crypto_kmap(walk->page, out) + walk->offset;
 }
 
-static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
-				 unsigned int more)
+static inline void scatterwalk_unmap(struct scatter_walk *walk, int out)
 {
 	/* walk->data may be pointing the first byte of the next page;
 	   however, we know we transfered at least one byte.  So,
 	   walk->data - 1 will be a virtual address in the mapped page. */
+	crypto_kunmap(walk->data - 1, out);
+}
 
+static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
+				 unsigned int more)
+{
 	if (out)
 		flush_dcache_page(walk->page);
 
@@ -91,7 +88,7 @@
 
 void scatterwalk_done(struct scatter_walk *walk, int out, int more)
 {
-	crypto_kunmap(walk->data, out);
+	scatterwalk_unmap(walk, out);
 	if (walk->len_this_page == 0 || !more)
 		scatterwalk_pagedone(walk, out, more);
 }
@@ -103,22 +100,16 @@
 int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
 			   size_t nbytes, int out)
 {
-	if (buf != walk->data) {
-		while (nbytes > walk->len_this_page) {
-			memcpy_dir(buf, walk->data, walk->len_this_page, out);
-			buf += walk->len_this_page;
-			nbytes -= walk->len_this_page;
-
-			crypto_kunmap(walk->data, out);
-			scatterwalk_pagedone(walk, out, 1);
-			scatterwalk_map(walk, out);
-		}
-
-		memcpy_dir(buf, walk->data, nbytes, out);
-	}
+	do {
+		memcpy_dir(buf, walk->data, walk->len_this_page, out);
+		buf += walk->len_this_page;
+		nbytes -= walk->len_this_page;
+
+		scatterwalk_unmap(walk, out);
+		scatterwalk_pagedone(walk, out, 1);
+		scatterwalk_map(walk, out);
+	} while (nbytes > walk->len_this_page);
 
-	walk->offset += nbytes;
-	walk->len_this_page -= nbytes;
-	walk->len_this_segment -= nbytes;
-	return 0;
+	memcpy_dir(buf, walk->data, nbytes, out);
+	return nbytes;
 }
diff -Nru a/crypto/scatterwalk.h b/crypto/scatterwalk.h
--- a/crypto/scatterwalk.h	2005-03-23 19:34:18 -08:00
+++ b/crypto/scatterwalk.h	2005-03-23 19:34:18 -08:00
@@ -34,15 +34,27 @@
 }
 
 static inline int scatterwalk_samebuf(struct scatter_walk *walk_in,
-				      struct scatter_walk *walk_out,
-				      void *src_p, void *dst_p)
+				      struct scatter_walk *walk_out)
 {
 	return walk_in->page == walk_out->page &&
-	       walk_in->offset == walk_out->offset &&
-	       walk_in->data == src_p && walk_out->data == dst_p;
+	       walk_in->offset == walk_out->offset;
+}
+
+static inline int scatterwalk_across_pages(struct scatter_walk *walk,
+					   unsigned int nbytes)
+{
+	return nbytes > walk->len_this_page;
+}
+
+static inline void scatterwalk_advance(struct scatter_walk *walk,
+				       unsigned int nbytes)
+{
+	walk->data += nbytes;
+	walk->offset += nbytes;
+	walk->len_this_page -= nbytes;
+	walk->len_this_segment -= nbytes;
 }
 
-void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch);
 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
 int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out);
 void scatterwalk_map(struct scatter_walk *walk, int out);
diff -Nru a/crypto/sha256.c b/crypto/sha256.c
--- a/crypto/sha256.c	2005-03-23 19:34:18 -08:00
+++ b/crypto/sha256.c	2005-03-23 19:34:18 -08:00
@@ -58,7 +58,7 @@
 
 static inline void LOAD_OP(int I, u32 *W, const u8 *input)
 {
-	W[I] = __be32_to_cpu( ((u32*)(input))[I] );
+	W[I] = __be32_to_cpu( ((__be32*)(input))[I] );
 }
 
 static inline void BLEND_OP(int I, u32 *W)
diff -Nru a/crypto/sha512.c b/crypto/sha512.c
--- a/crypto/sha512.c	2005-03-23 19:34:18 -08:00
+++ b/crypto/sha512.c	2005-03-23 19:34:18 -08:00
@@ -105,7 +105,7 @@
 
 static inline void LOAD_OP(int I, u64 *W, const u8 *input)
 {
-	W[I] = __be64_to_cpu( ((u64*)(input))[I] );
+	W[I] = __be64_to_cpu( ((__be64*)(input))[I] );
 }
 
 static inline void BLEND_OP(int I, u64 *W)
diff -Nru a/crypto/tea.c b/crypto/tea.c
--- a/crypto/tea.c	2005-03-23 19:34:18 -08:00
+++ b/crypto/tea.c	2005-03-23 19:34:18 -08:00
@@ -31,8 +31,8 @@
 #define XTEA_ROUNDS		32
 #define XTEA_DELTA		0x9e3779b9
 
-#define u32_in(x) le32_to_cpu(*(const u32 *)(x))
-#define u32_out(to, from) (*(u32 *)(to) = cpu_to_le32(from))
+#define u32_in(x) le32_to_cpu(*(const __le32 *)(x))
+#define u32_out(to, from) (*(__le32 *)(to) = cpu_to_le32(from))
 
 struct tea_ctx {
 	u32 KEY[4];