Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2017 Imagination Technologies |
| 3 | * Author: Paul Burton <paul.burton@mips.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License as published by the |
| 7 | * Free Software Foundation; either version 2 of the License, or (at your |
| 8 | * option) any later version. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/bitops.h> |
| 12 | #include <asm/cmpxchg.h> |
| 13 | |
| 14 | unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size) |
| 15 | { |
| 16 | u32 old32, new32, load32, mask; |
| 17 | volatile u32 *ptr32; |
| 18 | unsigned int shift; |
| 19 | |
| 20 | /* Check that ptr is naturally aligned */ |
| 21 | WARN_ON((unsigned long)ptr & (size - 1)); |
| 22 | |
| 23 | /* Mask value to the correct size. */ |
| 24 | mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); |
| 25 | val &= mask; |
| 26 | |
| 27 | /* |
| 28 | * Calculate a shift & mask that correspond to the value we wish to |
| 29 | * exchange within the naturally aligned 4 byte integerthat includes |
| 30 | * it. |
| 31 | */ |
| 32 | shift = (unsigned long)ptr & 0x3; |
| 33 | if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) |
| 34 | shift ^= sizeof(u32) - size; |
| 35 | shift *= BITS_PER_BYTE; |
| 36 | mask <<= shift; |
| 37 | |
| 38 | /* |
| 39 | * Calculate a pointer to the naturally aligned 4 byte integer that |
| 40 | * includes our byte of interest, and load its value. |
| 41 | */ |
| 42 | ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); |
| 43 | load32 = *ptr32; |
| 44 | |
| 45 | do { |
| 46 | old32 = load32; |
| 47 | new32 = (load32 & ~mask) | (val << shift); |
| 48 | load32 = cmpxchg(ptr32, old32, new32); |
| 49 | } while (load32 != old32); |
| 50 | |
| 51 | return (load32 & mask) >> shift; |
| 52 | } |
| 53 | |
| 54 | unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, |
| 55 | unsigned long new, unsigned int size) |
| 56 | { |
| 57 | u32 mask, old32, new32, load32; |
| 58 | volatile u32 *ptr32; |
| 59 | unsigned int shift; |
| 60 | u8 load; |
| 61 | |
| 62 | /* Check that ptr is naturally aligned */ |
| 63 | WARN_ON((unsigned long)ptr & (size - 1)); |
| 64 | |
| 65 | /* Mask inputs to the correct size. */ |
| 66 | mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); |
| 67 | old &= mask; |
| 68 | new &= mask; |
| 69 | |
| 70 | /* |
| 71 | * Calculate a shift & mask that correspond to the value we wish to |
| 72 | * compare & exchange within the naturally aligned 4 byte integer |
| 73 | * that includes it. |
| 74 | */ |
| 75 | shift = (unsigned long)ptr & 0x3; |
| 76 | if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) |
| 77 | shift ^= sizeof(u32) - size; |
| 78 | shift *= BITS_PER_BYTE; |
| 79 | mask <<= shift; |
| 80 | |
| 81 | /* |
| 82 | * Calculate a pointer to the naturally aligned 4 byte integer that |
| 83 | * includes our byte of interest, and load its value. |
| 84 | */ |
| 85 | ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); |
| 86 | load32 = *ptr32; |
| 87 | |
| 88 | while (true) { |
| 89 | /* |
| 90 | * Ensure the byte we want to exchange matches the expected |
| 91 | * old value, and if not then bail. |
| 92 | */ |
| 93 | load = (load32 & mask) >> shift; |
| 94 | if (load != old) |
| 95 | return load; |
| 96 | |
| 97 | /* |
| 98 | * Calculate the old & new values of the naturally aligned |
| 99 | * 4 byte integer that include the byte we want to exchange. |
| 100 | * Attempt to exchange the old value for the new value, and |
| 101 | * return if we succeed. |
| 102 | */ |
| 103 | old32 = (load32 & ~mask) | (old << shift); |
| 104 | new32 = (load32 & ~mask) | (new << shift); |
| 105 | load32 = cmpxchg(ptr32, old32, new32); |
| 106 | if (load32 == old32) |
| 107 | return old; |
| 108 | } |
| 109 | } |