1 #include <xen/rwlock.h>
2 #include <xen/irq.h>
3 
4 /*
5  * rspin_until_writer_unlock - spin until writer is gone.
6  * @lock  : Pointer to queue rwlock structure.
7  * @cnts: Current queue rwlock writer status byte.
8  *
9  * In interrupt context or at the head of the queue, the reader will just
10  * increment the reader count & wait until the writer releases the lock.
11  */
rspin_until_writer_unlock(rwlock_t * lock,u32 cnts)12 static inline void rspin_until_writer_unlock(rwlock_t *lock, u32 cnts)
13 {
14     while ( (cnts & _QW_WMASK) == _QW_LOCKED )
15     {
16         cpu_relax();
17         smp_rmb();
18         cnts = atomic_read(&lock->cnts);
19     }
20 }
21 
22 /*
23  * queue_read_lock_slowpath - acquire read lock of a queue rwlock.
24  * @lock: Pointer to queue rwlock structure.
25  */
queue_read_lock_slowpath(rwlock_t * lock)26 void queue_read_lock_slowpath(rwlock_t *lock)
27 {
28     u32 cnts;
29 
30     /*
31      * Readers come here when they cannot get the lock without waiting.
32      */
33     atomic_sub(_QR_BIAS, &lock->cnts);
34 
35     /*
36      * Put the reader into the wait queue.
37      *
38      * Use the speculation unsafe helper, as it's the caller responsibility to
39      * issue a speculation barrier if required.
40      */
41     _spin_lock(&lock->lock);
42 
43     /*
44      * At the head of the wait queue now, wait until the writer state
45      * goes to 0 and then try to increment the reader count and get
46      * the lock. It is possible that an incoming writer may steal the
47      * lock in the interim, so it is necessary to check the writer byte
48      * to make sure that the write lock isn't taken.
49      */
50     while ( atomic_read(&lock->cnts) & _QW_WMASK )
51         cpu_relax();
52 
53     cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
54     rspin_until_writer_unlock(lock, cnts);
55 
56     /*
57      * Signal the next one in queue to become queue head.
58      */
59     spin_unlock(&lock->lock);
60 
61     lock_enter(&lock->lock.debug);
62 }
63 
64 /*
65  * queue_write_lock_slowpath - acquire write lock of a queue rwlock
66  * @lock : Pointer to queue rwlock structure.
67  */
queue_write_lock_slowpath(rwlock_t * lock)68 void queue_write_lock_slowpath(rwlock_t *lock)
69 {
70     u32 cnts;
71 
72     /*
73      * Put the writer into the wait queue.
74      *
75      * Use the speculation unsafe helper, as it's the caller responsibility to
76      * issue a speculation barrier if required.
77      */
78     _spin_lock(&lock->lock);
79 
80     /* Try to acquire the lock directly if no reader is present. */
81     if ( !atomic_read(&lock->cnts) &&
82          (atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0) )
83         goto unlock;
84 
85     /*
86      * Set the waiting flag to notify readers that a writer is pending,
87      * or wait for a previous writer to go away.
88      */
89     for ( ; ; )
90     {
91         cnts = atomic_read(&lock->cnts);
92         if ( !(cnts & _QW_WMASK) &&
93              (atomic_cmpxchg(&lock->cnts, cnts,
94                              cnts | _QW_WAITING) == cnts) )
95             break;
96 
97         cpu_relax();
98     }
99 
100     /* When no more readers, set the locked flag. */
101     for ( ; ; )
102     {
103         cnts = atomic_read(&lock->cnts);
104         if ( (cnts == _QW_WAITING) &&
105              (atomic_cmpxchg(&lock->cnts, _QW_WAITING,
106                              _write_lock_val()) == _QW_WAITING) )
107             break;
108 
109         cpu_relax();
110     }
111  unlock:
112     spin_unlock(&lock->lock);
113 
114     lock_enter(&lock->lock.debug);
115 }
116 
117 
118 static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
119 
_percpu_write_lock(percpu_rwlock_t ** per_cpudata,percpu_rwlock_t * percpu_rwlock)120 void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
121                 percpu_rwlock_t *percpu_rwlock)
122 {
123     unsigned int cpu;
124     cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
125 
126     /* Validate the correct per_cpudata variable has been provided. */
127     _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
128 
129     /*
130      * First take the write lock to protect against other writers or slow
131      * path readers.
132      *
133      * Note we use the speculation unsafe variant of write_lock(), as the
134      * calling wrapper already adds a speculation barrier after the lock has
135      * been taken.
136      */
137     _write_lock(&percpu_rwlock->rwlock);
138 
139     /* Now set the global variable so that readers start using read_lock. */
140     percpu_rwlock->writer_activating = 1;
141     smp_mb();
142 
143     /* Using a per cpu cpumask is only safe if there is no nesting. */
144     ASSERT(!in_irq());
145     cpumask_copy(rwlock_readers, &cpu_online_map);
146 
147     /* Check if there are any percpu readers in progress on this rwlock. */
148     for ( ; ; )
149     {
150         for_each_cpu(cpu, rwlock_readers)
151         {
152             /*
153              * Remove any percpu readers not contending on this rwlock
154              * from our check mask.
155              */
156             if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
157                 __cpumask_clear_cpu(cpu, rwlock_readers);
158         }
159         /* Check if we've cleared all percpu readers from check mask. */
160         if ( cpumask_empty(rwlock_readers) )
161             break;
162         /* Give the coherency fabric a break. */
163         cpu_relax();
164     };
165 
166     lock_enter(&percpu_rwlock->rwlock.lock.debug);
167 }
168