aboutsummaryrefslogtreecommitdiffstats
path: root/lib/crypto/sparc/sha1_asm.S
diff options
context:
space:
mode:
authorEric Biggers <ebiggers@kernel.org>2025-07-12 16:23:03 -0700
committerEric Biggers <ebiggers@kernel.org>2025-07-14 11:11:49 -0700
commitc751059985e02467c7fa6b14676c1d56d089b3cc (patch)
tree52c3113e3d983ac4d43436944eded01f76aa4c1e /lib/crypto/sparc/sha1_asm.S
parentlib/crypto: s390/sha1: Migrate optimized code into library (diff)
downloadlinux-c751059985e02467c7fa6b14676c1d56d089b3cc.tar.gz
linux-c751059985e02467c7fa6b14676c1d56d089b3cc.zip
lib/crypto: sparc/sha1: Migrate optimized code into library
Instead of exposing the sparc-optimized SHA-1 code via sparc-specific crypto_shash algorithms, instead just implement the sha1_blocks() library function. This is much simpler, it makes the SHA-1 library functions be sparc-optimized, and it fixes the longstanding issue where the sparc-optimized SHA-1 code was disabled by default. SHA-1 still remains available through crypto_shash, but individual architectures no longer need to handle it. Note: to see the diff from arch/sparc/crypto/sha1_glue.c to lib/crypto/sparc/sha1.h, view this commit with 'git show -M10'. Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20250712232329.818226-13-ebiggers@kernel.org Signed-off-by: Eric Biggers <ebiggers@kernel.org>
Diffstat (limited to 'lib/crypto/sparc/sha1_asm.S')
-rw-r--r--lib/crypto/sparc/sha1_asm.S72
1 files changed, 72 insertions, 0 deletions
diff --git a/lib/crypto/sparc/sha1_asm.S b/lib/crypto/sparc/sha1_asm.S
new file mode 100644
index 000000000000..00b46bac1b08
--- /dev/null
+++ b/lib/crypto/sparc/sha1_asm.S
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/opcodes.h>
+#include <asm/visasm.h>
+
+ENTRY(sha1_sparc64_transform)
+ /* %o0 = digest, %o1 = data, %o2 = rounds */
+ VISEntryHalf
+ ld [%o0 + 0x00], %f0
+ ld [%o0 + 0x04], %f1
+ ld [%o0 + 0x08], %f2
+ andcc %o1, 0x7, %g0
+ ld [%o0 + 0x0c], %f3
+ bne,pn %xcc, 10f
+ ld [%o0 + 0x10], %f4
+
+1:
+ ldd [%o1 + 0x00], %f8
+ ldd [%o1 + 0x08], %f10
+ ldd [%o1 + 0x10], %f12
+ ldd [%o1 + 0x18], %f14
+ ldd [%o1 + 0x20], %f16
+ ldd [%o1 + 0x28], %f18
+ ldd [%o1 + 0x30], %f20
+ ldd [%o1 + 0x38], %f22
+
+ SHA1
+
+ subcc %o2, 1, %o2
+ bne,pt %xcc, 1b
+ add %o1, 0x40, %o1
+
+5:
+ st %f0, [%o0 + 0x00]
+ st %f1, [%o0 + 0x04]
+ st %f2, [%o0 + 0x08]
+ st %f3, [%o0 + 0x0c]
+ st %f4, [%o0 + 0x10]
+ retl
+ VISExitHalf
+10:
+ alignaddr %o1, %g0, %o1
+
+ ldd [%o1 + 0x00], %f10
+1:
+ ldd [%o1 + 0x08], %f12
+ ldd [%o1 + 0x10], %f14
+ ldd [%o1 + 0x18], %f16
+ ldd [%o1 + 0x20], %f18
+ ldd [%o1 + 0x28], %f20
+ ldd [%o1 + 0x30], %f22
+ ldd [%o1 + 0x38], %f24
+ ldd [%o1 + 0x40], %f26
+
+ faligndata %f10, %f12, %f8
+ faligndata %f12, %f14, %f10
+ faligndata %f14, %f16, %f12
+ faligndata %f16, %f18, %f14
+ faligndata %f18, %f20, %f16
+ faligndata %f20, %f22, %f18
+ faligndata %f22, %f24, %f20
+ faligndata %f24, %f26, %f22
+
+ SHA1
+
+ subcc %o2, 1, %o2
+ fsrc2 %f26, %f10
+ bne,pt %xcc, 1b
+ add %o1, 0x40, %o1
+
+ ba,a,pt %xcc, 5b
+ENDPROC(sha1_sparc64_transform)