blob: 0cda61855d90719e6175d0325598bbbe1b79254b [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
2 *
3 * This file is provided under a dual BSD/GPLv2 license.
4 *
5 * SipHash: a fast short-input PRF
6 * https://131002.net/siphash/
7 *
8 * This implementation is specifically for SipHash2-4 for a secure PRF
9 * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
10 * hashtables.
11 */
12
13#ifndef _LINUX_SIPHASH_H
14#define _LINUX_SIPHASH_H
15
16#include <linux/types.h>
17#include <linux/kernel.h>
18
19#define SIPHASH_ALIGNMENT __alignof__(u64)
20typedef struct {
21 u64 key[2];
22} siphash_key_t;
23
David Brazdil0f672f62019-12-10 10:32:29 +000024static inline bool siphash_key_is_zero(const siphash_key_t *key)
25{
26 return !(key->key[0] | key->key[1]);
27}
28
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031
32u64 siphash_1u64(const u64 a, const siphash_key_t *key);
33u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
34u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
35 const siphash_key_t *key);
36u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
37 const siphash_key_t *key);
38u64 siphash_1u32(const u32 a, const siphash_key_t *key);
39u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
40 const siphash_key_t *key);
41
42static inline u64 siphash_2u32(const u32 a, const u32 b,
43 const siphash_key_t *key)
44{
45 return siphash_1u64((u64)b << 32 | a, key);
46}
47static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
48 const u32 d, const siphash_key_t *key)
49{
50 return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
51}
52
53
54static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
55 const siphash_key_t *key)
56{
57 if (__builtin_constant_p(len) && len == 4)
58 return siphash_1u32(le32_to_cpup((const __le32 *)data), key);
59 if (__builtin_constant_p(len) && len == 8)
60 return siphash_1u64(le64_to_cpu(data[0]), key);
61 if (__builtin_constant_p(len) && len == 16)
62 return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
63 key);
64 if (__builtin_constant_p(len) && len == 24)
65 return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
66 le64_to_cpu(data[2]), key);
67 if (__builtin_constant_p(len) && len == 32)
68 return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
69 le64_to_cpu(data[2]), le64_to_cpu(data[3]),
70 key);
71 return __siphash_aligned(data, len, key);
72}
73
74/**
75 * siphash - compute 64-bit siphash PRF value
76 * @data: buffer to hash
77 * @size: size of @data
78 * @key: the siphash key
79 */
80static inline u64 siphash(const void *data, size_t len,
81 const siphash_key_t *key)
82{
Olivier Deprez157378f2022-04-04 15:47:50 +020083 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
84 !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085 return __siphash_unaligned(data, len, key);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086 return ___siphash_aligned(data, len, key);
87}
88
89#define HSIPHASH_ALIGNMENT __alignof__(unsigned long)
90typedef struct {
91 unsigned long key[2];
92} hsiphash_key_t;
93
94u32 __hsiphash_aligned(const void *data, size_t len,
95 const hsiphash_key_t *key);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096u32 __hsiphash_unaligned(const void *data, size_t len,
97 const hsiphash_key_t *key);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098
99u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
100u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
101u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
102 const hsiphash_key_t *key);
103u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
104 const hsiphash_key_t *key);
105
106static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
107 const hsiphash_key_t *key)
108{
109 if (__builtin_constant_p(len) && len == 4)
110 return hsiphash_1u32(le32_to_cpu(data[0]), key);
111 if (__builtin_constant_p(len) && len == 8)
112 return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
113 key);
114 if (__builtin_constant_p(len) && len == 12)
115 return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
116 le32_to_cpu(data[2]), key);
117 if (__builtin_constant_p(len) && len == 16)
118 return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
119 le32_to_cpu(data[2]), le32_to_cpu(data[3]),
120 key);
121 return __hsiphash_aligned(data, len, key);
122}
123
124/**
125 * hsiphash - compute 32-bit hsiphash PRF value
126 * @data: buffer to hash
127 * @size: size of @data
128 * @key: the hsiphash key
129 */
130static inline u32 hsiphash(const void *data, size_t len,
131 const hsiphash_key_t *key)
132{
Olivier Deprez157378f2022-04-04 15:47:50 +0200133 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
134 !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135 return __hsiphash_unaligned(data, len, key);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000136 return ___hsiphash_aligned(data, len, key);
137}
138
139#endif /* _LINUX_SIPHASH_H */