1 #include <xen/rwlock.h>
2 #include <xen/irq.h>
3 
4 /*
5  * rspin_until_writer_unlock - spin until writer is gone.
6  * @lock  : Pointer to queue rwlock structure.
7  * @cnts: Current queue rwlock writer status byte.
8  *
9  * In interrupt context or at the head of the queue, the reader will just
10  * increment the reader count & wait until the writer releases the lock.
11  */
rspin_until_writer_unlock(rwlock_t * lock,u32 cnts)12 static inline void rspin_until_writer_unlock(rwlock_t *lock, u32 cnts)
13 {
14     while ( (cnts & _QW_WMASK) == _QW_LOCKED )
15     {
16         cpu_relax();
17         smp_rmb();
18         cnts = atomic_read(&lock->cnts);
19     }
20 }
21 
22 /*
23  * queue_read_lock_slowpath - acquire read lock of a queue rwlock.
24  * @lock: Pointer to queue rwlock structure.
25  */
queue_read_lock_slowpath(rwlock_t * lock)26 void queue_read_lock_slowpath(rwlock_t *lock)
27 {
28     u32 cnts;
29 
30     /*
31      * Readers come here when they cannot get the lock without waiting.
32      */
33     atomic_sub(_QR_BIAS, &lock->cnts);
34 
35     /*
36      * Put the reader into the wait queue.
37      */
38     spin_lock(&lock->lock);
39 
40     /*
41      * At the head of the wait queue now, wait until the writer state
42      * goes to 0 and then try to increment the reader count and get
43      * the lock. It is possible that an incoming writer may steal the
44      * lock in the interim, so it is necessary to check the writer byte
45      * to make sure that the write lock isn't taken.
46      */
47     while ( atomic_read(&lock->cnts) & _QW_WMASK )
48         cpu_relax();
49 
50     cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
51     rspin_until_writer_unlock(lock, cnts);
52 
53     /*
54      * Signal the next one in queue to become queue head.
55      */
56     spin_unlock(&lock->lock);
57 }
58 
59 /*
60  * queue_write_lock_slowpath - acquire write lock of a queue rwlock
61  * @lock : Pointer to queue rwlock structure.
62  */
queue_write_lock_slowpath(rwlock_t * lock)63 void queue_write_lock_slowpath(rwlock_t *lock)
64 {
65     u32 cnts;
66 
67     /* Put the writer into the wait queue. */
68     spin_lock(&lock->lock);
69 
70     /* Try to acquire the lock directly if no reader is present. */
71     if ( !atomic_read(&lock->cnts) &&
72          (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0) )
73         goto unlock;
74 
75     /*
76      * Set the waiting flag to notify readers that a writer is pending,
77      * or wait for a previous writer to go away.
78      */
79     for ( ; ; )
80     {
81         cnts = atomic_read(&lock->cnts);
82         if ( !(cnts & _QW_WMASK) &&
83              (atomic_cmpxchg(&lock->cnts, cnts,
84                              cnts | _QW_WAITING) == cnts) )
85             break;
86 
87         cpu_relax();
88     }
89 
90     /* When no more readers, set the locked flag. */
91     for ( ; ; )
92     {
93         cnts = atomic_read(&lock->cnts);
94         if ( (cnts == _QW_WAITING) &&
95              (atomic_cmpxchg(&lock->cnts, _QW_WAITING,
96                              _QW_LOCKED) == _QW_WAITING) )
97             break;
98 
99         cpu_relax();
100     }
101  unlock:
102     spin_unlock(&lock->lock);
103 }
104 
105 
106 static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
107 
_percpu_write_lock(percpu_rwlock_t ** per_cpudata,percpu_rwlock_t * percpu_rwlock)108 void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
109                 percpu_rwlock_t *percpu_rwlock)
110 {
111     unsigned int cpu;
112     cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
113 
114     /* Validate the correct per_cpudata variable has been provided. */
115     _percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
116 
117     /*
118      * First take the write lock to protect against other writers or slow
119      * path readers.
120      */
121     write_lock(&percpu_rwlock->rwlock);
122 
123     /* Now set the global variable so that readers start using read_lock. */
124     percpu_rwlock->writer_activating = 1;
125     smp_mb();
126 
127     /* Using a per cpu cpumask is only safe if there is no nesting. */
128     ASSERT(!in_irq());
129     cpumask_copy(rwlock_readers, &cpu_online_map);
130 
131     /* Check if there are any percpu readers in progress on this rwlock. */
132     for ( ; ; )
133     {
134         for_each_cpu(cpu, rwlock_readers)
135         {
136             /*
137              * Remove any percpu readers not contending on this rwlock
138              * from our check mask.
139              */
140             if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
141                 __cpumask_clear_cpu(cpu, rwlock_readers);
142         }
143         /* Check if we've cleared all percpu readers from check mask. */
144         if ( cpumask_empty(rwlock_readers) )
145             break;
146         /* Give the coherency fabric a break. */
147         cpu_relax();
148     };
149 }
150