Pull string hash improvements from George Spelvin: "This series does several related things: - Makes the dcache hash (fs/namei.c) useful for general kernel use. (Thanks to Bruce for noticing the zero-length corner case) - Converts the string hashes in <linux/sunrpc/svcauth.h> to use the above. - Avoids 64-bit multiplies in hash_64() on 32-bit platforms. Two 32-bit multiplies will do well enough. - Rids the world of the bad hash multipliers in hash_32. This finishes the job started in committirimbino689de1d6ca
("Minimal fix-up of bad hashing behavior of hash_64()") The vast majority of Linux architectures have hardware support for 32x32-bit multiply and so derive no benefit from "simplified" multipliers. The few processors that do not (68000, h8/300 and some models of Microblaze) have arch-specific implementations added. Those patches are last in the series. - Overhauls the dcache hash mixing. The patch in commit0fed3ac866
("namei: Improve hash mixing if CONFIG_DCACHE_WORD_ACCESS") was an off-the-cuff suggestion. Replaced with a much more careful design that's simultaneously faster and better. (My own invention, as there was noting suitable in the literature I could find. Comments welcome!) - Modify the hash_name() loop to skip the initial HASH_MIX(). This would let us salt the hash if we ever wanted to. - Sort out partial_name_hash(). The hash function is declared as using a long state, even though it's truncated to 32 bits at the end and the extra internal state contributes nothing to the result. And some callers do odd things: - fs/hfs/string.c only allocates 32 bits of state - fs/hfsplus/unicode.c uses it to hash 16-bit unicode symbols not bytes - Modify bytemask_from_count to handle inputs of 1..sizeof(long) rather than 0..sizeof(long)-1. This would simplify users other than full_name_hash" Special thanks to Bruce Fields for testing and finding bugs in v1. (I learned some humbling lessons about "obviously correct" code.) On the arch-specific front, the m68k assembly has been tested in a standalone test harness, I've been in contact with the Microblaze maintainers who mostly don't care, as the hardware multiplier is never omitted in real-world applications, and I haven't heard anything from the H8/300 world" * 'hash' of git://ftp.sciencehorizons.net/linux: h8300: Add <asm/hash.h> microblaze: Add <asm/hash.h> m68k: Add <asm/hash.h> <linux/hash.h>: Add support for architecture-specific functions fs/namei.c: Improve dcache hash function Eliminate bad hash multipliers from hash_32() and hash_64() Change hash_64() return value to 32 bits <linux/sunrpc/svcauth.h>: Define hash_str() in terms of hashlen_string() fs/namei.c: Add hashlen_string() function Pull out string hash to <linux/stringhash.h>
commit
7e0fb73c52
@ -0,0 +1,53 @@ |
|||||||
|
#ifndef _ASM_HASH_H |
||||||
|
#define _ASM_HASH_H |
||||||
|
|
||||||
|
/*
|
||||||
|
* The later H8SX models have a 32x32-bit multiply, but the H8/300H |
||||||
|
* and H8S have only 16x16->32. Since it's tolerably compact, this is |
||||||
|
* basically an inlined version of the __mulsi3 code. Since the inputs |
||||||
|
* are not expected to be small, it's also simplfied by skipping the |
||||||
|
* early-out checks. |
||||||
|
* |
||||||
|
* (Since neither CPU has any multi-bit shift instructions, a |
||||||
|
* shift-and-add version is a non-starter.) |
||||||
|
* |
||||||
|
* TODO: come up with an arch-specific version of the hashing in fs/namei.c, |
||||||
|
* since that is heavily dependent on rotates. Which, as mentioned, suck |
||||||
|
* horribly on H8. |
||||||
|
*/ |
||||||
|
|
||||||
|
#if defined(CONFIG_CPU_H300H) || defined(CONFIG_CPU_H8S) |
||||||
|
|
||||||
|
#define HAVE_ARCH__HASH_32 1 |
||||||
|
|
||||||
|
/*
|
||||||
|
* Multiply by k = 0x61C88647. Fitting this into three registers requires |
||||||
|
* one extra instruction, but reducing register pressure will probably |
||||||
|
* make that back and then some. |
||||||
|
* |
||||||
|
* GCC asm note: %e1 is the high half of operand %1, while %f1 is the |
||||||
|
* low half. So if %1 is er4, then %e1 is e4 and %f1 is r4. |
||||||
|
* |
||||||
|
* This has been designed to modify x in place, since that's the most |
||||||
|
* common usage, but preserve k, since hash_64() makes two calls in |
||||||
|
* quick succession. |
||||||
|
*/ |
||||||
|
static inline u32 __attribute_const__ __hash_32(u32 x) |
||||||
|
{ |
||||||
|
u32 temp; |
||||||
|
|
||||||
|
asm( "mov.w %e1,%f0" |
||||||
|
"\n mulxu.w %f2,%0" /* klow * xhigh */ |
||||||
|
"\n mov.w %f0,%e1" /* The extra instruction */ |
||||||
|
"\n mov.w %f1,%f0" |
||||||
|
"\n mulxu.w %e2,%0" /* khigh * xlow */ |
||||||
|
"\n add.w %e1,%f0" |
||||||
|
"\n mulxu.w %f2,%1" /* klow * xlow */ |
||||||
|
"\n add.w %f0,%e1" |
||||||
|
: "=&r" (temp), "=r" (x) |
||||||
|
: "%r" (GOLDEN_RATIO_32), "1" (x)); |
||||||
|
return x; |
||||||
|
} |
||||||
|
|
||||||
|
#endif |
||||||
|
#endif /* _ASM_HASH_H */ |
@ -0,0 +1,59 @@ |
|||||||
|
#ifndef _ASM_HASH_H |
||||||
|
#define _ASM_HASH_H |
||||||
|
|
||||||
|
/*
|
||||||
|
* If CONFIG_M68000=y (original mc68000/010), this file is #included |
||||||
|
* to work around the lack of a MULU.L instruction. |
||||||
|
*/ |
||||||
|
|
||||||
|
#define HAVE_ARCH__HASH_32 1 |
||||||
|
/*
|
||||||
|
* While it would be legal to substitute a different hash operation |
||||||
|
* entirely, let's keep it simple and just use an optimized multiply |
||||||
|
* by GOLDEN_RATIO_32 = 0x61C88647. |
||||||
|
* |
||||||
|
* The best way to do that appears to be to multiply by 0x8647 with |
||||||
|
* shifts and adds, and use mulu.w to multiply the high half by 0x61C8. |
||||||
|
* |
||||||
|
* Because the 68000 has multi-cycle shifts, this addition chain is |
||||||
|
* chosen to minimise the shift distances. |
||||||
|
* |
||||||
|
* Despite every attempt to spoon-feed it simple operations, GCC |
||||||
|
* 6.1.1 doggedly insists on doing annoying things like converting |
||||||
|
* "lsl.l #2,<reg>" (12 cycles) to two adds (8+8 cycles). |
||||||
|
* |
||||||
|
* It also likes to notice two shifts in a row, like "a = x << 2" and |
||||||
|
* "a <<= 7", and convert that to "a = x << 9". But shifts longer |
||||||
|
* than 8 bits are extra-slow on m68k, so that's a lose. |
||||||
|
* |
||||||
|
* Since the 68000 is a very simple in-order processor with no |
||||||
|
* instruction scheduling effects on execution time, we can safely |
||||||
|
* take it out of GCC's hands and write one big asm() block. |
||||||
|
* |
||||||
|
* Without calling overhead, this operation is 30 bytes (14 instructions |
||||||
|
* plus one immediate constant) and 166 cycles. |
||||||
|
* |
||||||
|
* (Because %2 is fetched twice, it can't be postincrement, and thus it |
||||||
|
* can't be a fully general "g" or "m". Register is preferred, but |
||||||
|
* offsettable memory or immediate will work.) |
||||||
|
*/ |
||||||
|
static inline u32 __attribute_const__ __hash_32(u32 x) |
||||||
|
{ |
||||||
|
u32 a, b; |
||||||
|
|
||||||
|
asm( "move.l %2,%0" /* a = x * 0x0001 */ |
||||||
|
"\n lsl.l #2,%0" /* a = x * 0x0004 */ |
||||||
|
"\n move.l %0,%1" |
||||||
|
"\n lsl.l #7,%0" /* a = x * 0x0200 */ |
||||||
|
"\n add.l %2,%0" /* a = x * 0x0201 */ |
||||||
|
"\n add.l %0,%1" /* b = x * 0x0205 */ |
||||||
|
"\n add.l %0,%0" /* a = x * 0x0402 */ |
||||||
|
"\n add.l %0,%1" /* b = x * 0x0607 */ |
||||||
|
"\n lsl.l #5,%0" /* a = x * 0x8040 */ |
||||||
|
: "=&d,d" (a), "=&r,r" (b) |
||||||
|
: "r,roi?" (x)); /* a+b = x*0x8647 */ |
||||||
|
|
||||||
|
return ((u16)(x*0x61c8) << 16) + a + b; |
||||||
|
} |
||||||
|
|
||||||
|
#endif /* _ASM_HASH_H */ |
@ -0,0 +1,81 @@ |
|||||||
|
#ifndef _ASM_HASH_H |
||||||
|
#define _ASM_HASH_H |
||||||
|
|
||||||
|
/*
|
||||||
|
* Fortunately, most people who want to run Linux on Microblaze enable |
||||||
|
* both multiplier and barrel shifter, but omitting them is technically |
||||||
|
* a supported configuration. |
||||||
|
* |
||||||
|
* With just a barrel shifter, we can implement an efficient constant |
||||||
|
* multiply using shifts and adds. GCC can find a 9-step solution, but |
||||||
|
* this 6-step solution was found by Yevgen Voronenko's implementation |
||||||
|
* of the Hcub algorithm at http://spiral.ece.cmu.edu/mcm/gen.html.
|
||||||
|
* |
||||||
|
* That software is really not designed for a single multiplier this large, |
||||||
|
* but if you run it enough times with different seeds, it'll find several |
||||||
|
* 6-shift, 6-add sequences for computing x * 0x61C88647. They are all |
||||||
|
* c = (x << 19) + x; |
||||||
|
* a = (x << 9) + c; |
||||||
|
* b = (x << 23) + a; |
||||||
|
* return (a<<11) + (b<<6) + (c<<3) - b; |
||||||
|
* with variations on the order of the final add. |
||||||
|
* |
||||||
|
* Without even a shifter, it's hopless; any hash function will suck. |
||||||
|
*/ |
||||||
|
|
||||||
|
#if CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL == 0 |
||||||
|
|
||||||
|
#define HAVE_ARCH__HASH_32 1 |
||||||
|
|
||||||
|
/* Multiply by GOLDEN_RATIO_32 = 0x61C88647 */ |
||||||
|
static inline u32 __attribute_const__ __hash_32(u32 a) |
||||||
|
{ |
||||||
|
#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL |
||||||
|
unsigned int b, c; |
||||||
|
|
||||||
|
/* Phase 1: Compute three intermediate values */ |
||||||
|
b = a << 23; |
||||||
|
c = (a << 19) + a; |
||||||
|
a = (a << 9) + c; |
||||||
|
b += a; |
||||||
|
|
||||||
|
/* Phase 2: Compute (a << 11) + (b << 6) + (c << 3) - b */ |
||||||
|
a <<= 5; |
||||||
|
a += b; /* (a << 5) + b */ |
||||||
|
a <<= 3; |
||||||
|
a += c; /* (a << 8) + (b << 3) + c */ |
||||||
|
a <<= 3; |
||||||
|
return a - b; /* (a << 11) + (b << 6) + (c << 3) - b */ |
||||||
|
#else |
||||||
|
/*
|
||||||
|
* "This is really going to hurt." |
||||||
|
* |
||||||
|
* Without a barrel shifter, left shifts are implemented as |
||||||
|
* repeated additions, and the best we can do is an optimal |
||||||
|
* addition-subtraction chain. This one is not known to be |
||||||
|
* optimal, but at 37 steps, it's decent for a 31-bit multiplier. |
||||||
|
* |
||||||
|
* Question: given its size (37*4 = 148 bytes per instance), |
||||||
|
* and slowness, is this worth having inline? |
||||||
|
*/ |
||||||
|
unsigned int b, c, d; |
||||||
|
|
||||||
|
b = a << 4; /* 4 */ |
||||||
|
c = b << 1; /* 1 5 */ |
||||||
|
b += a; /* 1 6 */ |
||||||
|
c += b; /* 1 7 */ |
||||||
|
c <<= 3; /* 3 10 */ |
||||||
|
c -= a; /* 1 11 */ |
||||||
|
d = c << 7; /* 7 18 */ |
||||||
|
d += b; /* 1 19 */ |
||||||
|
d <<= 8; /* 8 27 */ |
||||||
|
d += a; /* 1 28 */ |
||||||
|
d <<= 1; /* 1 29 */ |
||||||
|
d += b; /* 1 30 */ |
||||||
|
d <<= 6; /* 6 36 */ |
||||||
|
return d + c; /* 1 37 total instructions*/ |
||||||
|
#endif |
||||||
|
} |
||||||
|
|
||||||
|
#endif /* !CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL */ |
||||||
|
#endif /* _ASM_HASH_H */ |
@ -0,0 +1,76 @@ |
|||||||
|
#ifndef __LINUX_STRINGHASH_H |
||||||
|
#define __LINUX_STRINGHASH_H |
||||||
|
|
||||||
|
#include <linux/compiler.h> /* For __pure */ |
||||||
|
#include <linux/types.h> /* For u32, u64 */ |
||||||
|
|
||||||
|
/*
|
||||||
|
* Routines for hashing strings of bytes to a 32-bit hash value. |
||||||
|
* |
||||||
|
* These hash functions are NOT GUARANTEED STABLE between kernel |
||||||
|
* versions, architectures, or even repeated boots of the same kernel. |
||||||
|
* (E.g. they may depend on boot-time hardware detection or be |
||||||
|
* deliberately randomized.) |
||||||
|
* |
||||||
|
* They are also not intended to be secure against collisions caused by |
||||||
|
* malicious inputs; much slower hash functions are required for that. |
||||||
|
* |
||||||
|
* They are optimized for pathname components, meaning short strings. |
||||||
|
* Even if a majority of files have longer names, the dynamic profile of |
||||||
|
* pathname components skews short due to short directory names. |
||||||
|
* (E.g. /usr/lib/libsesquipedalianism.so.3.141.) |
||||||
|
*/ |
||||||
|
|
||||||
|
/*
|
||||||
|
* Version 1: one byte at a time. Example of use: |
||||||
|
* |
||||||
|
* unsigned long hash = init_name_hash; |
||||||
|
* while (*p) |
||||||
|
* hash = partial_name_hash(tolower(*p++), hash); |
||||||
|
* hash = end_name_hash(hash); |
||||||
|
* |
||||||
|
* Although this is designed for bytes, fs/hfsplus/unicode.c |
||||||
|
* abuses it to hash 16-bit values. |
||||||
|
*/ |
||||||
|
|
||||||
|
/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ |
||||||
|
#define init_name_hash() 0 |
||||||
|
|
||||||
|
/* partial hash update function. Assume roughly 4 bits per character */ |
||||||
|
static inline unsigned long |
||||||
|
partial_name_hash(unsigned long c, unsigned long prevhash) |
||||||
|
{ |
||||||
|
return (prevhash + (c << 4) + (c >> 4)) * 11; |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Finally: cut down the number of bits to a int value (and try to avoid |
||||||
|
* losing bits) |
||||||
|
*/ |
||||||
|
static inline unsigned long end_name_hash(unsigned long hash) |
||||||
|
{ |
||||||
|
return (unsigned int)hash; |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Version 2: One word (32 or 64 bits) at a time. |
||||||
|
* If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h> |
||||||
|
* exists, which describes major Linux platforms like x86 and ARM), then |
||||||
|
* this computes a different hash function much faster. |
||||||
|
* |
||||||
|
* If not set, this falls back to a wrapper around the preceding. |
||||||
|
*/ |
||||||
|
extern unsigned int __pure full_name_hash(const char *, unsigned int); |
||||||
|
|
||||||
|
/*
|
||||||
|
* A hash_len is a u64 with the hash of a string in the low |
||||||
|
* half and the length in the high half. |
||||||
|
*/ |
||||||
|
#define hashlen_hash(hashlen) ((u32)(hashlen)) |
||||||
|
#define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) |
||||||
|
#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash)) |
||||||
|
|
||||||
|
/* Return the "hash_len" (hash and length) of a null-terminated string */ |
||||||
|
extern u64 __pure hashlen_string(const char *name); |
||||||
|
|
||||||
|
#endif /* __LINUX_STRINGHASH_H */ |
@ -0,0 +1,250 @@ |
|||||||
|
/*
|
||||||
|
* Test cases for <linux/hash.h> and <linux/stringhash.h> |
||||||
|
* This just verifies that various ways of computing a hash |
||||||
|
* produce the same thing and, for cases where a k-bit hash |
||||||
|
* value is requested, is of the requested size. |
||||||
|
* |
||||||
|
* We fill a buffer with a 255-byte null-terminated string, |
||||||
|
* and use both full_name_hash() and hashlen_string() to hash the |
||||||
|
* substrings from i to j, where 0 <= i < j < 256. |
||||||
|
* |
||||||
|
* The returned values are used to check that __hash_32() and |
||||||
|
* __hash_32_generic() compute the same thing. Likewise hash_32() |
||||||
|
* and hash_64(). |
||||||
|
*/ |
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt "\n" |
||||||
|
|
||||||
|
#include <linux/compiler.h> |
||||||
|
#include <linux/types.h> |
||||||
|
#include <linux/module.h> |
||||||
|
#include <linux/hash.h> |
||||||
|
#include <linux/stringhash.h> |
||||||
|
#include <linux/printk.h> |
||||||
|
|
||||||
|
/* 32-bit XORSHIFT generator. Seed must not be zero. */ |
||||||
|
static u32 __init __attribute_const__ |
||||||
|
xorshift(u32 seed) |
||||||
|
{ |
||||||
|
seed ^= seed << 13; |
||||||
|
seed ^= seed >> 17; |
||||||
|
seed ^= seed << 5; |
||||||
|
return seed; |
||||||
|
} |
||||||
|
|
||||||
|
/* Given a non-zero x, returns a non-zero byte. */ |
||||||
|
static u8 __init __attribute_const__ |
||||||
|
mod255(u32 x) |
||||||
|
{ |
||||||
|
x = (x & 0xffff) + (x >> 16); /* 1 <= x <= 0x1fffe */ |
||||||
|
x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x2fd */ |
||||||
|
x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x100 */ |
||||||
|
x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0xff */ |
||||||
|
return x; |
||||||
|
} |
||||||
|
|
||||||
|
/* Fill the buffer with non-zero bytes. */ |
||||||
|
static void __init |
||||||
|
fill_buf(char *buf, size_t len, u32 seed) |
||||||
|
{ |
||||||
|
size_t i; |
||||||
|
|
||||||
|
for (i = 0; i < len; i++) { |
||||||
|
seed = xorshift(seed); |
||||||
|
buf[i] = mod255(seed); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/*
|
||||||
|
* Test the various integer hash functions. h64 (or its low-order bits) |
||||||
|
* is the integer to hash. hash_or accumulates the OR of the hash values, |
||||||
|
* which are later checked to see that they cover all the requested bits. |
||||||
|
* |
||||||
|
* Because these functions (as opposed to the string hashes) are all |
||||||
|
* inline, the code being tested is actually in the module, and you can |
||||||
|
* recompile and re-test the module without rebooting. |
||||||
|
*/ |
||||||
|
static bool __init |
||||||
|
test_int_hash(unsigned long long h64, u32 hash_or[2][33]) |
||||||
|
{ |
||||||
|
int k; |
||||||
|
u32 h0 = (u32)h64, h1, h2; |
||||||
|
|
||||||
|
/* Test __hash32 */ |
||||||
|
hash_or[0][0] |= h1 = __hash_32(h0); |
||||||
|
#ifdef HAVE_ARCH__HASH_32 |
||||||
|
hash_or[1][0] |= h2 = __hash_32_generic(h0); |
||||||
|
#if HAVE_ARCH__HASH_32 == 1 |
||||||
|
if (h1 != h2) { |
||||||
|
pr_err("__hash_32(%#x) = %#x != __hash_32_generic() = %#x", |
||||||
|
h0, h1, h2); |
||||||
|
return false; |
||||||
|
} |
||||||
|
#endif |
||||||
|
#endif |
||||||
|
|
||||||
|
/* Test k = 1..32 bits */ |
||||||
|
for (k = 1; k <= 32; k++) { |
||||||
|
u32 const m = ((u32)2 << (k-1)) - 1; /* Low k bits set */ |
||||||
|
|
||||||
|
/* Test hash_32 */ |
||||||
|
hash_or[0][k] |= h1 = hash_32(h0, k); |
||||||
|
if (h1 > m) { |
||||||
|
pr_err("hash_32(%#x, %d) = %#x > %#x", h0, k, h1, m); |
||||||
|
return false; |
||||||
|
} |
||||||
|
#ifdef HAVE_ARCH_HASH_32 |
||||||
|
h2 = hash_32_generic(h0, k); |
||||||
|
#if HAVE_ARCH_HASH_32 == 1 |
||||||
|
if (h1 != h2) { |
||||||
|
pr_err("hash_32(%#x, %d) = %#x != hash_32_generic() " |
||||||
|
" = %#x", h0, k, h1, h2); |
||||||
|
return false; |
||||||
|
} |
||||||
|
#else |
||||||
|
if (h2 > m) { |
||||||
|
pr_err("hash_32_generic(%#x, %d) = %#x > %#x", |
||||||
|
h0, k, h1, m); |
||||||
|
return false; |
||||||
|
} |
||||||
|
#endif |
||||||
|
#endif |
||||||
|
/* Test hash_64 */ |
||||||
|
hash_or[1][k] |= h1 = hash_64(h64, k); |
||||||
|
if (h1 > m) { |
||||||
|
pr_err("hash_64(%#llx, %d) = %#x > %#x", h64, k, h1, m); |
||||||
|
return false; |
||||||
|
} |
||||||
|
#ifdef HAVE_ARCH_HASH_64 |
||||||
|
h2 = hash_64_generic(h64, k); |
||||||
|
#if HAVE_ARCH_HASH_64 == 1 |
||||||
|
if (h1 != h2) { |
||||||
|
pr_err("hash_64(%#llx, %d) = %#x != hash_64_generic() " |
||||||
|
"= %#x", h64, k, h1, h2); |
||||||
|
return false; |
||||||
|
} |
||||||
|
#else |
||||||
|
if (h2 > m) { |
||||||
|
pr_err("hash_64_generic(%#llx, %d) = %#x > %#x", |
||||||
|
h64, k, h1, m); |
||||||
|
return false; |
||||||
|
} |
||||||
|
#endif |
||||||
|
#endif |
||||||
|
} |
||||||
|
|
||||||
|
(void)h2; /* Suppress unused variable warning */ |
||||||
|
return true; |
||||||
|
} |
||||||
|
|
||||||
|
#define SIZE 256 /* Run time is cubic in SIZE */ |
||||||
|
|
||||||
|
static int __init |
||||||
|
test_hash_init(void) |
||||||
|
{ |
||||||
|
char buf[SIZE+1]; |
||||||
|
u32 string_or = 0, hash_or[2][33] = { 0 }; |
||||||
|
unsigned tests = 0; |
||||||
|
unsigned long long h64 = 0; |
||||||
|
int i, j; |
||||||
|
|
||||||
|
fill_buf(buf, SIZE, 1); |
||||||
|
|
||||||
|
/* Test every possible non-empty substring in the buffer. */ |
||||||
|
for (j = SIZE; j > 0; --j) { |
||||||
|
buf[j] = '\0'; |
||||||
|
|
||||||
|
for (i = 0; i <= j; i++) { |
||||||
|
u64 hashlen = hashlen_string(buf+i); |
||||||
|
u32 h0 = full_name_hash(buf+i, j-i); |
||||||
|
|
||||||
|
/* Check that hashlen_string gets the length right */ |
||||||
|
if (hashlen_len(hashlen) != j-i) { |
||||||
|
pr_err("hashlen_string(%d..%d) returned length" |
||||||
|
" %u, expected %d", |
||||||
|
i, j, hashlen_len(hashlen), j-i); |
||||||
|
return -EINVAL; |
||||||
|
} |
||||||
|
/* Check that the hashes match */ |
||||||
|
if (hashlen_hash(hashlen) != h0) { |
||||||
|
pr_err("hashlen_string(%d..%d) = %08x != " |
||||||
|
"full_name_hash() = %08x", |
||||||
|
i, j, hashlen_hash(hashlen), h0); |
||||||
|
return -EINVAL; |
||||||
|
} |
||||||
|
|
||||||
|
string_or |= h0; |
||||||
|
h64 = h64 << 32 | h0; /* For use with hash_64 */ |
||||||
|
if (!test_int_hash(h64, hash_or)) |
||||||
|
return -EINVAL; |
||||||
|
tests++; |
||||||
|
} /* i */ |
||||||
|
} /* j */ |
||||||
|
|
||||||
|
/* The OR of all the hash values should cover all the bits */ |
||||||
|
if (~string_or) { |
||||||
|
pr_err("OR of all string hash results = %#x != %#x", |
||||||
|
string_or, -1u); |
||||||
|
return -EINVAL; |
||||||
|
} |
||||||
|
if (~hash_or[0][0]) { |
||||||
|
pr_err("OR of all __hash_32 results = %#x != %#x", |
||||||
|
hash_or[0][0], -1u); |
||||||
|
return -EINVAL; |
||||||
|
} |
||||||
|
#ifdef HAVE_ARCH__HASH_32 |
||||||
|
#if HAVE_ARCH__HASH_32 != 1 /* Test is pointless if results match */ |
||||||
|
if (~hash_or[1][0]) { |
||||||
|
pr_err("OR of all __hash_32_generic results = %#x != %#x", |
||||||
|
hash_or[1][0], -1u); |
||||||
|
return -EINVAL; |
||||||
|
} |
||||||
|
#endif |
||||||
|
#endif |
||||||
|
|
||||||
|
/* Likewise for all the i-bit hash values */ |
||||||
|
for (i = 1; i <= 32; i++) { |
||||||
|
u32 const m = ((u32)2 << (i-1)) - 1; /* Low i bits set */ |
||||||
|
|
||||||
|
if (hash_or[0][i] != m) { |
||||||
|
pr_err("OR of all hash_32(%d) results = %#x " |
||||||
|
"(%#x expected)", i, hash_or[0][i], m); |
||||||
|
return -EINVAL; |
||||||
|
} |
||||||
|
if (hash_or[1][i] != m) { |
||||||
|
pr_err("OR of all hash_64(%d) results = %#x " |
||||||
|
"(%#x expected)", i, hash_or[1][i], m); |
||||||
|
return -EINVAL; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
/* Issue notices about skipped tests. */ |
||||||
|
#ifndef HAVE_ARCH__HASH_32 |
||||||
|
pr_info("__hash_32() has no arch implementation to test."); |
||||||
|
#elif HAVE_ARCH__HASH_32 != 1 |
||||||
|
pr_info("__hash_32() is arch-specific; not compared to generic."); |
||||||
|
#endif |
||||||
|
#ifndef HAVE_ARCH_HASH_32 |
||||||
|
pr_info("hash_32() has no arch implementation to test."); |
||||||
|
#elif HAVE_ARCH_HASH_32 != 1 |
||||||
|
pr_info("hash_32() is arch-specific; not compared to generic."); |
||||||
|
#endif |
||||||
|
#ifndef HAVE_ARCH_HASH_64 |
||||||
|
pr_info("hash_64() has no arch implementation to test."); |
||||||
|
#elif HAVE_ARCH_HASH_64 != 1 |
||||||
|
pr_info("hash_64() is arch-specific; not compared to generic."); |
||||||
|
#endif |
||||||
|
|
||||||
|
pr_notice("%u tests passed.", tests); |
||||||
|
|
||||||
|
return 0; |
||||||
|
} |
||||||
|
|
||||||
|
static void __exit test_hash_exit(void) |
||||||
|
{ |
||||||
|
} |
||||||
|
|
||||||
|
module_init(test_hash_init); /* Does everything */ |
||||||
|
module_exit(test_hash_exit); /* Does nothing */ |
||||||
|
|
||||||
|
MODULE_LICENSE("GPL"); |
Loading…
Reference in new issue