crypto: Add generic 8-bit carry-less multiply routines
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
diff --git a/crypto/clmul.c b/crypto/clmul.c
new file mode 100644
index 0000000..82d873f
--- /dev/null
+++ b/crypto/clmul.c
@@ -0,0 +1,60 @@
+/*
+ * Carry-less multiply operations.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#include "qemu/osdep.h"
+#include "crypto/clmul.h"
+
+uint64_t clmul_8x8_low(uint64_t n, uint64_t m)
+{
+ uint64_t r = 0;
+
+ for (int i = 0; i < 8; ++i) {
+ uint64_t mask = (n & 0x0101010101010101ull) * 0xff;
+ r ^= m & mask;
+ m = (m << 1) & 0xfefefefefefefefeull;
+ n >>= 1;
+ }
+ return r;
+}
+
+static uint64_t clmul_8x4_even_int(uint64_t n, uint64_t m)
+{
+ uint64_t r = 0;
+
+ for (int i = 0; i < 8; ++i) {
+ uint64_t mask = (n & 0x0001000100010001ull) * 0xffff;
+ r ^= m & mask;
+ n >>= 1;
+ m <<= 1;
+ }
+ return r;
+}
+
+uint64_t clmul_8x4_even(uint64_t n, uint64_t m)
+{
+ n &= 0x00ff00ff00ff00ffull;
+ m &= 0x00ff00ff00ff00ffull;
+ return clmul_8x4_even_int(n, m);
+}
+
+uint64_t clmul_8x4_odd(uint64_t n, uint64_t m)
+{
+ return clmul_8x4_even(n >> 8, m >> 8);
+}
+
+static uint64_t unpack_8_to_16(uint64_t x)
+{
+ return (x & 0x000000ff)
+ | ((x & 0x0000ff00) << 8)
+ | ((x & 0x00ff0000) << 16)
+ | ((x & 0xff000000) << 24);
+}
+
+uint64_t clmul_8x4_packed(uint32_t n, uint32_t m)
+{
+ return clmul_8x4_even_int(unpack_8_to_16(n), unpack_8_to_16(m));
+}
diff --git a/crypto/meson.build b/crypto/meson.build
index 5f03a30..9ac1a89 100644
--- a/crypto/meson.build
+++ b/crypto/meson.build
@@ -48,9 +48,12 @@
endif
crypto_ss.add(when: gnutls, if_true: files('tls-cipher-suites.c'))
-util_ss.add(files('sm4.c'))
-util_ss.add(files('aes.c'))
-util_ss.add(files('init.c'))
+util_ss.add(files(
+ 'aes.c',
+ 'clmul.c',
+ 'init.c',
+ 'sm4.c',
+))
if gnutls.found()
util_ss.add(gnutls)
endif