David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Spin and read/write lock operations. |
| 4 | * |
| 5 | * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM |
| 6 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM |
| 7 | * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM |
| 8 | * Rework to support virtual processors |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/spinlock.h> |
| 13 | #include <linux/export.h> |
| 14 | #include <linux/smp.h> |
| 15 | |
| 16 | /* waiting for a spinlock... */ |
| 17 | #if defined(CONFIG_PPC_SPLPAR) |
| 18 | #include <asm/hvcall.h> |
| 19 | #include <asm/smp.h> |
| 20 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 21 | void splpar_spin_yield(arch_spinlock_t *lock) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 22 | { |
| 23 | unsigned int lock_value, holder_cpu, yield_count; |
| 24 | |
| 25 | lock_value = lock->slock; |
| 26 | if (lock_value == 0) |
| 27 | return; |
| 28 | holder_cpu = lock_value & 0xffff; |
| 29 | BUG_ON(holder_cpu >= NR_CPUS); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 30 | |
| 31 | yield_count = yield_count_of(holder_cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 32 | if ((yield_count & 1) == 0) |
| 33 | return; /* virtual cpu is currently running */ |
| 34 | rmb(); |
| 35 | if (lock->slock != lock_value) |
| 36 | return; /* something has changed */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 37 | yield_to_preempted(holder_cpu, yield_count); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 39 | EXPORT_SYMBOL_GPL(splpar_spin_yield); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | |
| 41 | /* |
| 42 | * Waiting for a read lock or a write lock on a rwlock... |
| 43 | * This turns out to be the same for read and write locks, since |
| 44 | * we only know the holder if it is write-locked. |
| 45 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 46 | void splpar_rw_yield(arch_rwlock_t *rw) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 47 | { |
| 48 | int lock_value; |
| 49 | unsigned int holder_cpu, yield_count; |
| 50 | |
| 51 | lock_value = rw->lock; |
| 52 | if (lock_value >= 0) |
| 53 | return; /* no write lock at present */ |
| 54 | holder_cpu = lock_value & 0xffff; |
| 55 | BUG_ON(holder_cpu >= NR_CPUS); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 56 | |
| 57 | yield_count = yield_count_of(holder_cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 58 | if ((yield_count & 1) == 0) |
| 59 | return; /* virtual cpu is currently running */ |
| 60 | rmb(); |
| 61 | if (rw->lock != lock_value) |
| 62 | return; /* something has changed */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 63 | yield_to_preempted(holder_cpu, yield_count); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 64 | } |
| 65 | #endif |