blob: acab7769240b568eefdda964f1be52a7ad42c17e [file] [log] [blame]
Anup Patel9e8ff052018-12-11 19:24:06 +05301/*
Anup patel20990ee2019-01-24 11:41:10 +05302 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 Western Digital Corporation or its affiliates.
Christoph Muellner4d8e2f12021-04-06 03:53:54 +02005 * Copyright (c) 2021 Christoph Müllner <cmuellner@linux.com>
Anup Patel9e8ff052018-12-11 19:24:06 +05306 */
7
8#include <sbi/riscv_barrier.h>
9#include <sbi/riscv_locks.h>
10
Daniel Schaeferf90c4c22021-05-13 12:52:35 +080011static inline bool spin_lock_unlocked(spinlock_t lock)
Anup Patel9e8ff052018-12-11 19:24:06 +053012{
Christoph Muellner4d8e2f12021-04-06 03:53:54 +020013 return lock.owner == lock.next;
14}
15
16bool spin_lock_check(spinlock_t *lock)
17{
18 RISCV_FENCE(r, rw);
19 return !spin_lock_unlocked(*lock);
Anup Patel9e8ff052018-12-11 19:24:06 +053020}
21
Daniel Schaeferf90c4c22021-05-13 12:52:35 +080022bool spin_trylock(spinlock_t *lock)
Anup Patel9e8ff052018-12-11 19:24:06 +053023{
Christoph Muellner4d8e2f12021-04-06 03:53:54 +020024 unsigned long inc = 1u << TICKET_SHIFT;
25 unsigned long mask = 0xffffu << TICKET_SHIFT;
26 u32 l0, tmp1, tmp2;
Anup Patel9e8ff052018-12-11 19:24:06 +053027
Olof Johansson10baa642019-04-10 17:41:52 -070028 __asm__ __volatile__(
Christoph Muellner4d8e2f12021-04-06 03:53:54 +020029 /* Get the current lock counters. */
30 "1: lr.w.aq %0, %3\n"
31 " slli %2, %0, %6\n"
32 " and %2, %2, %5\n"
33 " and %1, %0, %5\n"
34 /* Is the lock free right now? */
35 " bne %1, %2, 2f\n"
36 " add %0, %0, %4\n"
37 /* Acquire the lock. */
38 " sc.w.rl %0, %0, %3\n"
39 " bnez %0, 1b\n"
40 "2:"
41 : "=&r"(l0), "=&r"(tmp1), "=&r"(tmp2), "+A"(*lock)
42 : "r"(inc), "r"(mask), "I"(TICKET_SHIFT)
Anup Patel9e8ff052018-12-11 19:24:06 +053043 : "memory");
44
Daniel Schaeferf90c4c22021-05-13 12:52:35 +080045 return l0 == 0;
Anup Patel9e8ff052018-12-11 19:24:06 +053046}
47
48void spin_lock(spinlock_t *lock)
49{
Christoph Muellner4d8e2f12021-04-06 03:53:54 +020050 unsigned long inc = 1u << TICKET_SHIFT;
51 unsigned long mask = 0xffffu;
52 u32 l0, tmp1, tmp2;
Anup Patel9e8ff052018-12-11 19:24:06 +053053
Christoph Muellner4d8e2f12021-04-06 03:53:54 +020054 __asm__ __volatile__(
55 /* Atomically increment the next ticket. */
56 " amoadd.w.aqrl %0, %4, %3\n"
57
58 /* Did we get the lock? */
59 " srli %1, %0, %6\n"
60 " and %1, %1, %5\n"
61 "1: and %2, %0, %5\n"
62 " beq %1, %2, 2f\n"
63
64 /* If not, then spin on the lock. */
65 " lw %0, %3\n"
66 RISCV_ACQUIRE_BARRIER
67 " j 1b\n"
68 "2:"
69 : "=&r"(l0), "=&r"(tmp1), "=&r"(tmp2), "+A"(*lock)
70 : "r"(inc), "r"(mask), "I"(TICKET_SHIFT)
71 : "memory");
Anup Patel9e8ff052018-12-11 19:24:06 +053072}
73
74void spin_unlock(spinlock_t *lock)
75{
Christoph Muellner4d8e2f12021-04-06 03:53:54 +020076 __smp_store_release(&lock->owner, lock->owner + 1);
Anup Patel9e8ff052018-12-11 19:24:06 +053077}