Anup Patel | 9e8ff05 | 2018-12-11 19:24:06 +0530 | [diff] [blame] | 1 | /* |
Anup patel | 20990ee | 2019-01-24 11:41:10 +0530 | [diff] [blame] | 2 | * SPDX-License-Identifier: BSD-2-Clause |
| 3 | * |
| 4 | * Copyright (c) 2019 Western Digital Corporation or its affiliates. |
Christoph Muellner | 4d8e2f1 | 2021-04-06 03:53:54 +0200 | [diff] [blame] | 5 | * Copyright (c) 2021 Christoph Müllner <cmuellner@linux.com> |
Anup Patel | 9e8ff05 | 2018-12-11 19:24:06 +0530 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <sbi/riscv_barrier.h> |
| 9 | #include <sbi/riscv_locks.h> |
| 10 | |
Daniel Schaefer | f90c4c2 | 2021-05-13 12:52:35 +0800 | [diff] [blame] | 11 | static inline bool spin_lock_unlocked(spinlock_t lock) |
Anup Patel | 9e8ff05 | 2018-12-11 19:24:06 +0530 | [diff] [blame] | 12 | { |
Christoph Muellner | 4d8e2f1 | 2021-04-06 03:53:54 +0200 | [diff] [blame] | 13 | return lock.owner == lock.next; |
| 14 | } |
| 15 | |
| 16 | bool spin_lock_check(spinlock_t *lock) |
| 17 | { |
| 18 | RISCV_FENCE(r, rw); |
| 19 | return !spin_lock_unlocked(*lock); |
Anup Patel | 9e8ff05 | 2018-12-11 19:24:06 +0530 | [diff] [blame] | 20 | } |
| 21 | |
Daniel Schaefer | f90c4c2 | 2021-05-13 12:52:35 +0800 | [diff] [blame] | 22 | bool spin_trylock(spinlock_t *lock) |
Anup Patel | 9e8ff05 | 2018-12-11 19:24:06 +0530 | [diff] [blame] | 23 | { |
Christoph Muellner | 4d8e2f1 | 2021-04-06 03:53:54 +0200 | [diff] [blame] | 24 | unsigned long inc = 1u << TICKET_SHIFT; |
| 25 | unsigned long mask = 0xffffu << TICKET_SHIFT; |
| 26 | u32 l0, tmp1, tmp2; |
Anup Patel | 9e8ff05 | 2018-12-11 19:24:06 +0530 | [diff] [blame] | 27 | |
Olof Johansson | 10baa64 | 2019-04-10 17:41:52 -0700 | [diff] [blame] | 28 | __asm__ __volatile__( |
Christoph Muellner | 4d8e2f1 | 2021-04-06 03:53:54 +0200 | [diff] [blame] | 29 | /* Get the current lock counters. */ |
| 30 | "1: lr.w.aq %0, %3\n" |
| 31 | " slli %2, %0, %6\n" |
| 32 | " and %2, %2, %5\n" |
| 33 | " and %1, %0, %5\n" |
| 34 | /* Is the lock free right now? */ |
| 35 | " bne %1, %2, 2f\n" |
| 36 | " add %0, %0, %4\n" |
| 37 | /* Acquire the lock. */ |
| 38 | " sc.w.rl %0, %0, %3\n" |
| 39 | " bnez %0, 1b\n" |
| 40 | "2:" |
| 41 | : "=&r"(l0), "=&r"(tmp1), "=&r"(tmp2), "+A"(*lock) |
| 42 | : "r"(inc), "r"(mask), "I"(TICKET_SHIFT) |
Anup Patel | 9e8ff05 | 2018-12-11 19:24:06 +0530 | [diff] [blame] | 43 | : "memory"); |
| 44 | |
Daniel Schaefer | f90c4c2 | 2021-05-13 12:52:35 +0800 | [diff] [blame] | 45 | return l0 == 0; |
Anup Patel | 9e8ff05 | 2018-12-11 19:24:06 +0530 | [diff] [blame] | 46 | } |
| 47 | |
| 48 | void spin_lock(spinlock_t *lock) |
| 49 | { |
Christoph Muellner | 4d8e2f1 | 2021-04-06 03:53:54 +0200 | [diff] [blame] | 50 | unsigned long inc = 1u << TICKET_SHIFT; |
| 51 | unsigned long mask = 0xffffu; |
| 52 | u32 l0, tmp1, tmp2; |
Anup Patel | 9e8ff05 | 2018-12-11 19:24:06 +0530 | [diff] [blame] | 53 | |
Christoph Muellner | 4d8e2f1 | 2021-04-06 03:53:54 +0200 | [diff] [blame] | 54 | __asm__ __volatile__( |
| 55 | /* Atomically increment the next ticket. */ |
| 56 | " amoadd.w.aqrl %0, %4, %3\n" |
| 57 | |
| 58 | /* Did we get the lock? */ |
| 59 | " srli %1, %0, %6\n" |
| 60 | " and %1, %1, %5\n" |
| 61 | "1: and %2, %0, %5\n" |
| 62 | " beq %1, %2, 2f\n" |
| 63 | |
| 64 | /* If not, then spin on the lock. */ |
| 65 | " lw %0, %3\n" |
| 66 | RISCV_ACQUIRE_BARRIER |
| 67 | " j 1b\n" |
| 68 | "2:" |
| 69 | : "=&r"(l0), "=&r"(tmp1), "=&r"(tmp2), "+A"(*lock) |
| 70 | : "r"(inc), "r"(mask), "I"(TICKET_SHIFT) |
| 71 | : "memory"); |
Anup Patel | 9e8ff05 | 2018-12-11 19:24:06 +0530 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | void spin_unlock(spinlock_t *lock) |
| 75 | { |
Christoph Muellner | 4d8e2f1 | 2021-04-06 03:53:54 +0200 | [diff] [blame] | 76 | __smp_store_release(&lock->owner, lock->owner + 1); |
Anup Patel | 9e8ff05 | 2018-12-11 19:24:06 +0530 | [diff] [blame] | 77 | } |