Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | #ifndef __ASM_X86_REFCOUNT_H |
| 2 | #define __ASM_X86_REFCOUNT_H |
| 3 | /* |
| 4 | * x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from |
| 5 | * PaX/grsecurity. |
| 6 | */ |
| 7 | #include <linux/refcount.h> |
| 8 | #include <asm/bug.h> |
| 9 | |
| 10 | /* |
| 11 | * This is the first portion of the refcount error handling, which lives in |
| 12 | * .text.unlikely, and is jumped to from the CPU flag check (in the |
| 13 | * following macros). This saves the refcount value location into CX for |
| 14 | * the exception handler to use (in mm/extable.c), and then triggers the |
| 15 | * central refcount exception. The fixup address for the exception points |
| 16 | * back to the regular execution flow in .text. |
| 17 | */ |
| 18 | #define _REFCOUNT_EXCEPTION \ |
| 19 | ".pushsection .text..refcount\n" \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 20 | "111:\tlea %[var], %%" _ASM_CX "\n" \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 21 | "112:\t" ASM_UD2 "\n" \ |
| 22 | ASM_UNREACHABLE \ |
| 23 | ".popsection\n" \ |
| 24 | "113:\n" \ |
| 25 | _ASM_EXTABLE_REFCOUNT(112b, 113b) |
| 26 | |
| 27 | /* Trigger refcount exception if refcount result is negative. */ |
| 28 | #define REFCOUNT_CHECK_LT_ZERO \ |
| 29 | "js 111f\n\t" \ |
| 30 | _REFCOUNT_EXCEPTION |
| 31 | |
| 32 | /* Trigger refcount exception if refcount result is zero or negative. */ |
| 33 | #define REFCOUNT_CHECK_LE_ZERO \ |
| 34 | "jz 111f\n\t" \ |
| 35 | REFCOUNT_CHECK_LT_ZERO |
| 36 | |
| 37 | /* Trigger refcount exception unconditionally. */ |
| 38 | #define REFCOUNT_ERROR \ |
| 39 | "jmp 111f\n\t" \ |
| 40 | _REFCOUNT_EXCEPTION |
| 41 | |
| 42 | static __always_inline void refcount_add(unsigned int i, refcount_t *r) |
| 43 | { |
| 44 | asm volatile(LOCK_PREFIX "addl %1,%0\n\t" |
| 45 | REFCOUNT_CHECK_LT_ZERO |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 46 | : [var] "+m" (r->refs.counter) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 47 | : "ir" (i) |
| 48 | : "cc", "cx"); |
| 49 | } |
| 50 | |
| 51 | static __always_inline void refcount_inc(refcount_t *r) |
| 52 | { |
| 53 | asm volatile(LOCK_PREFIX "incl %0\n\t" |
| 54 | REFCOUNT_CHECK_LT_ZERO |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 55 | : [var] "+m" (r->refs.counter) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 56 | : : "cc", "cx"); |
| 57 | } |
| 58 | |
| 59 | static __always_inline void refcount_dec(refcount_t *r) |
| 60 | { |
| 61 | asm volatile(LOCK_PREFIX "decl %0\n\t" |
| 62 | REFCOUNT_CHECK_LE_ZERO |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 63 | : [var] "+m" (r->refs.counter) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 64 | : : "cc", "cx"); |
| 65 | } |
| 66 | |
| 67 | static __always_inline __must_check |
| 68 | bool refcount_sub_and_test(unsigned int i, refcount_t *r) |
| 69 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 70 | bool ret = GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", |
| 71 | REFCOUNT_CHECK_LT_ZERO, |
| 72 | r->refs.counter, e, "er", i, "cx"); |
| 73 | |
| 74 | if (ret) { |
| 75 | smp_acquire__after_ctrl_dep(); |
| 76 | return true; |
| 77 | } |
| 78 | |
| 79 | return false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) |
| 83 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 84 | bool ret = GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", |
| 85 | REFCOUNT_CHECK_LT_ZERO, |
| 86 | r->refs.counter, e, "cx"); |
| 87 | |
| 88 | if (ret) { |
| 89 | smp_acquire__after_ctrl_dep(); |
| 90 | return true; |
| 91 | } |
| 92 | |
| 93 | return false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | static __always_inline __must_check |
| 97 | bool refcount_add_not_zero(unsigned int i, refcount_t *r) |
| 98 | { |
| 99 | int c, result; |
| 100 | |
| 101 | c = atomic_read(&(r->refs)); |
| 102 | do { |
| 103 | if (unlikely(c == 0)) |
| 104 | return false; |
| 105 | |
| 106 | result = c + i; |
| 107 | |
| 108 | /* Did we try to increment from/to an undesirable state? */ |
| 109 | if (unlikely(c < 0 || c == INT_MAX || result < c)) { |
| 110 | asm volatile(REFCOUNT_ERROR |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 111 | : : [var] "m" (r->refs.counter) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 112 | : "cc", "cx"); |
| 113 | break; |
| 114 | } |
| 115 | |
| 116 | } while (!atomic_try_cmpxchg(&(r->refs), &c, result)); |
| 117 | |
| 118 | return c != 0; |
| 119 | } |
| 120 | |
| 121 | static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r) |
| 122 | { |
| 123 | return refcount_add_not_zero(1, r); |
| 124 | } |
| 125 | |
| 126 | #endif |