1 /*
2  * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
3  *
4  * Licensed under the Apache License 2.0 (the "License").  You may not use
5  * this file except in compliance with the License.  You can obtain a copy
6  * in the file LICENSE in the source distribution or at
7  * https://www.openssl.org/source/license.html
8  */
9 
10 #if defined(_WIN32)
11 # include <windows.h>
12 # if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600
13 #  define USE_RWLOCK
14 # endif
15 #endif
16 #include <assert.h>
17 
18 /*
19  * VC++ 2008 or earlier x86 compilers do not have an inline implementation
20  * of InterlockedOr64 for 32bit and will fail to run on Windows XP 32bit.
21  * https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedor-intrinsic-functions#requirements
22  * To work around this problem, we implement a manual locking mechanism for
23  * only VC++ 2008 or earlier x86 compilers.
24  */
25 
26 #if ((defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER <= 1600) || (defined(__MINGW32__) && !defined(__MINGW64__)))
27 # define NO_INTERLOCKEDOR64
28 #endif
29 
30 #include <openssl/crypto.h>
31 #include <crypto/cryptlib.h>
32 #include "internal/common.h"
33 #include "internal/thread_arch.h"
34 #include "internal/threads_common.h"
35 #include "internal/rcu.h"
36 #include "rcu_internal.h"
37 
38 #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && defined(OPENSSL_SYS_WINDOWS)
39 
40 # ifdef USE_RWLOCK
41 typedef struct {
42     SRWLOCK lock;
43     int exclusive;
44 } CRYPTO_win_rwlock;
45 # endif
46 
47 /*
48  * This defines a quescent point (qp)
49  * This is the barrier beyond which a writer
50  * must wait before freeing data that was
51  * atomically updated
52  */
53 struct rcu_qp {
54     volatile uint64_t users;
55 };
56 
57 struct thread_qp {
58     struct rcu_qp *qp;
59     unsigned int depth;
60     CRYPTO_RCU_LOCK *lock;
61 };
62 
63 # define MAX_QPS 10
64 /*
65  * This is the per thread tracking data
66  * that is assigned to each thread participating
67  * in an rcu qp
68  *
69  * qp points to the qp that it last acquired
70  *
71  */
72 struct rcu_thr_data {
73     struct thread_qp thread_qps[MAX_QPS];
74 };
75 
76 /*
77  * This is the internal version of a CRYPTO_RCU_LOCK
78  * it is cast from CRYPTO_RCU_LOCK
79  */
80 struct rcu_lock_st {
81     /* Callbacks to call for next ossl_synchronize_rcu */
82     struct rcu_cb_item *cb_items;
83 
84     /* The context we are being created against */
85     OSSL_LIB_CTX *ctx;
86 
87     /* Array of quiescent points for synchronization */
88     struct rcu_qp *qp_group;
89 
90     /* rcu generation counter for in-order retirement */
91     uint32_t id_ctr;
92 
93     /* Number of elements in qp_group array */
94     uint32_t group_count;
95 
96     /* Index of the current qp in the qp_group array */
97     uint32_t reader_idx;
98 
99     /* value of the next id_ctr value to be retired */
100     uint32_t next_to_retire;
101 
102     /* index of the next free rcu_qp in the qp_group */
103     uint32_t current_alloc_idx;
104 
105     /* number of qp's in qp_group array currently being retired */
106     uint32_t writers_alloced;
107 
108     /* lock protecting write side operations */
109     CRYPTO_MUTEX *write_lock;
110 
111     /* lock protecting updates to writers_alloced/current_alloc_idx */
112     CRYPTO_MUTEX *alloc_lock;
113 
114     /* signal to wake threads waiting on alloc_lock */
115     CRYPTO_CONDVAR *alloc_signal;
116 
117     /* lock to enforce in-order retirement */
118     CRYPTO_MUTEX *prior_lock;
119 
120     /* signal to wake threads waiting on prior_lock */
121     CRYPTO_CONDVAR *prior_signal;
122 
123     /* lock used with NO_INTERLOCKEDOR64: VS2010 x86 */
124     CRYPTO_RWLOCK *rw_lock;
125 };
126 
allocate_new_qp_group(struct rcu_lock_st * lock,uint32_t count)127 static struct rcu_qp *allocate_new_qp_group(struct rcu_lock_st *lock,
128                                             uint32_t count)
129 {
130     struct rcu_qp *new =
131         OPENSSL_calloc(count, sizeof(*new));
132 
133     lock->group_count = count;
134     return new;
135 }
136 
ossl_rcu_lock_new(int num_writers,OSSL_LIB_CTX * ctx)137 CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
138 {
139     struct rcu_lock_st *new;
140 
141     /*
142      * We need a minimum of 2 qps
143      */
144     if (num_writers < 2)
145         num_writers = 2;
146 
147     ctx = ossl_lib_ctx_get_concrete(ctx);
148     if (ctx == NULL)
149         return 0;
150 
151     new = OPENSSL_zalloc(sizeof(*new));
152 
153     if (new == NULL)
154         return NULL;
155 
156     new->ctx = ctx;
157     new->rw_lock = CRYPTO_THREAD_lock_new();
158     new->write_lock = ossl_crypto_mutex_new();
159     new->alloc_signal = ossl_crypto_condvar_new();
160     new->prior_signal = ossl_crypto_condvar_new();
161     new->alloc_lock = ossl_crypto_mutex_new();
162     new->prior_lock = ossl_crypto_mutex_new();
163     new->qp_group = allocate_new_qp_group(new, num_writers);
164     if (new->qp_group == NULL
165         || new->alloc_signal == NULL
166         || new->prior_signal == NULL
167         || new->write_lock == NULL
168         || new->alloc_lock == NULL
169         || new->prior_lock == NULL
170         || new->rw_lock == NULL) {
171         CRYPTO_THREAD_lock_free(new->rw_lock);
172         OPENSSL_free(new->qp_group);
173         ossl_crypto_condvar_free(&new->alloc_signal);
174         ossl_crypto_condvar_free(&new->prior_signal);
175         ossl_crypto_mutex_free(&new->alloc_lock);
176         ossl_crypto_mutex_free(&new->prior_lock);
177         ossl_crypto_mutex_free(&new->write_lock);
178         OPENSSL_free(new);
179         new = NULL;
180     }
181 
182     return new;
183 
184 }
185 
ossl_rcu_lock_free(CRYPTO_RCU_LOCK * lock)186 void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
187 {
188     CRYPTO_THREAD_lock_free(lock->rw_lock);
189     OPENSSL_free(lock->qp_group);
190     ossl_crypto_condvar_free(&lock->alloc_signal);
191     ossl_crypto_condvar_free(&lock->prior_signal);
192     ossl_crypto_mutex_free(&lock->alloc_lock);
193     ossl_crypto_mutex_free(&lock->prior_lock);
194     ossl_crypto_mutex_free(&lock->write_lock);
195     OPENSSL_free(lock);
196 }
197 
198 /* Read side acquisition of the current qp */
get_hold_current_qp(CRYPTO_RCU_LOCK * lock)199 static ossl_inline struct rcu_qp *get_hold_current_qp(CRYPTO_RCU_LOCK *lock)
200 {
201     uint32_t qp_idx;
202     uint32_t tmp;
203     uint64_t tmp64;
204 
205     /* get the current qp index */
206     for (;;) {
207         CRYPTO_atomic_load_int((int *)&lock->reader_idx, (int *)&qp_idx,
208                                lock->rw_lock);
209         CRYPTO_atomic_add64(&lock->qp_group[qp_idx].users, (uint64_t)1, &tmp64,
210                             lock->rw_lock);
211         CRYPTO_atomic_load_int((int *)&lock->reader_idx, (int *)&tmp,
212                                lock->rw_lock);
213         if (qp_idx == tmp)
214             break;
215         CRYPTO_atomic_add64(&lock->qp_group[qp_idx].users, (uint64_t)-1, &tmp64,
216                             lock->rw_lock);
217     }
218 
219     return &lock->qp_group[qp_idx];
220 }
221 
ossl_rcu_free_local_data(void * arg)222 static void ossl_rcu_free_local_data(void *arg)
223 {
224     OSSL_LIB_CTX *ctx = arg;
225     struct rcu_thr_data *data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, ctx);
226 
227     CRYPTO_THREAD_set_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, ctx, NULL);
228     OPENSSL_free(data);
229 }
230 
ossl_rcu_read_lock(CRYPTO_RCU_LOCK * lock)231 int ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
232 {
233     struct rcu_thr_data *data;
234     int i;
235     int available_qp = -1;
236 
237     /*
238      * we're going to access current_qp here so ask the
239      * processor to fetch it
240      */
241     data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx);
242 
243     if (data == NULL) {
244         data = OPENSSL_zalloc(sizeof(*data));
245         if (data == NULL)
246             return 0;
247         if (!CRYPTO_THREAD_set_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx, data)) {
248             OPENSSL_free(data);
249             return 0;
250         }
251         if (!ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data)) {
252             OPENSSL_free(data);
253             CRYPTO_THREAD_set_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx, NULL);
254             return 0;
255         }
256     }
257 
258     for (i = 0; i < MAX_QPS; i++) {
259         if (data->thread_qps[i].qp == NULL && available_qp == -1)
260             available_qp = i;
261         /* If we have a hold on this lock already, we're good */
262         if (data->thread_qps[i].lock == lock)
263             return 1;
264     }
265 
266     /*
267      * if we get here, then we don't have a hold on this lock yet
268      */
269     assert(available_qp != -1);
270 
271     data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
272     data->thread_qps[available_qp].depth = 1;
273     data->thread_qps[available_qp].lock = lock;
274     return 1;
275 }
276 
ossl_rcu_write_lock(CRYPTO_RCU_LOCK * lock)277 void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
278 {
279     ossl_crypto_mutex_lock(lock->write_lock);
280 }
281 
ossl_rcu_write_unlock(CRYPTO_RCU_LOCK * lock)282 void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
283 {
284     ossl_crypto_mutex_unlock(lock->write_lock);
285 }
286 
ossl_rcu_read_unlock(CRYPTO_RCU_LOCK * lock)287 void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
288 {
289     struct rcu_thr_data *data = CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY, lock->ctx);
290     int i;
291     LONG64 ret;
292 
293     assert(data != NULL);
294 
295     for (i = 0; i < MAX_QPS; i++) {
296         if (data->thread_qps[i].lock == lock) {
297             data->thread_qps[i].depth--;
298             if (data->thread_qps[i].depth == 0) {
299                 CRYPTO_atomic_add64(&data->thread_qps[i].qp->users,
300                                     (uint64_t)-1, (uint64_t *)&ret,
301                                     lock->rw_lock);
302                 OPENSSL_assert(ret >= 0);
303                 data->thread_qps[i].qp = NULL;
304                 data->thread_qps[i].lock = NULL;
305             }
306             return;
307         }
308     }
309 }
310 
311 /*
312  * Write side allocation routine to get the current qp
313  * and replace it with a new one
314  */
update_qp(CRYPTO_RCU_LOCK * lock,uint32_t * curr_id)315 static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
316 {
317     uint32_t current_idx;
318     uint32_t tmp;
319 
320     ossl_crypto_mutex_lock(lock->alloc_lock);
321     /*
322      * we need at least one qp to be available with one
323      * left over, so that readers can start working on
324      * one that isn't yet being waited on
325      */
326     while (lock->group_count - lock->writers_alloced < 2)
327         /* we have to wait for one to be free */
328         ossl_crypto_condvar_wait(lock->alloc_signal, lock->alloc_lock);
329 
330     current_idx = lock->current_alloc_idx;
331 
332     /* Allocate the qp */
333     lock->writers_alloced++;
334 
335     /* increment the allocation index */
336     lock->current_alloc_idx =
337         (lock->current_alloc_idx + 1) % lock->group_count;
338 
339     /* get and insert a new id */
340     *curr_id = lock->id_ctr;
341     lock->id_ctr++;
342 
343     /* update the reader index to be the prior qp */
344     tmp = lock->current_alloc_idx;
345 # if (defined(NO_INTERLOCKEDOR64))
346     CRYPTO_THREAD_write_lock(lock->rw_lock);
347     lock->reader_idx = tmp;
348     CRYPTO_THREAD_unlock(lock->rw_lock);
349 # else
350     InterlockedExchange((LONG volatile *)&lock->reader_idx, tmp);
351 # endif
352 
353     /* wake up any waiters */
354     ossl_crypto_condvar_broadcast(lock->alloc_signal);
355     ossl_crypto_mutex_unlock(lock->alloc_lock);
356     return &lock->qp_group[current_idx];
357 }
358 
retire_qp(CRYPTO_RCU_LOCK * lock,struct rcu_qp * qp)359 static void retire_qp(CRYPTO_RCU_LOCK *lock,
360                       struct rcu_qp *qp)
361 {
362     ossl_crypto_mutex_lock(lock->alloc_lock);
363     lock->writers_alloced--;
364     ossl_crypto_condvar_broadcast(lock->alloc_signal);
365     ossl_crypto_mutex_unlock(lock->alloc_lock);
366 }
367 
368 
ossl_synchronize_rcu(CRYPTO_RCU_LOCK * lock)369 void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
370 {
371     struct rcu_qp *qp;
372     uint64_t count;
373     uint32_t curr_id;
374     struct rcu_cb_item *cb_items, *tmpcb;
375 
376     /* before we do anything else, lets grab the cb list */
377     ossl_crypto_mutex_lock(lock->write_lock);
378     cb_items = lock->cb_items;
379     lock->cb_items = NULL;
380     ossl_crypto_mutex_unlock(lock->write_lock);
381 
382     qp = update_qp(lock, &curr_id);
383 
384     /* retire in order */
385     ossl_crypto_mutex_lock(lock->prior_lock);
386     while (lock->next_to_retire != curr_id)
387         ossl_crypto_condvar_wait(lock->prior_signal, lock->prior_lock);
388 
389     /* wait for the reader count to reach zero */
390     do {
391         CRYPTO_atomic_load(&qp->users, &count, lock->rw_lock);
392     } while (count != (uint64_t)0);
393 
394     lock->next_to_retire++;
395     ossl_crypto_condvar_broadcast(lock->prior_signal);
396     ossl_crypto_mutex_unlock(lock->prior_lock);
397 
398     retire_qp(lock, qp);
399 
400     /* handle any callbacks that we have */
401     while (cb_items != NULL) {
402         tmpcb = cb_items;
403         cb_items = cb_items->next;
404         tmpcb->fn(tmpcb->data);
405         OPENSSL_free(tmpcb);
406     }
407 
408     /* and we're done */
409     return;
410 
411 }
412 
413 /*
414  * Note, must be called under the protection of ossl_rcu_write_lock
415  */
ossl_rcu_call(CRYPTO_RCU_LOCK * lock,rcu_cb_fn cb,void * data)416 int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
417 {
418     struct rcu_cb_item *new;
419 
420     new = OPENSSL_zalloc(sizeof(struct rcu_cb_item));
421     if (new == NULL)
422         return 0;
423     new->data = data;
424     new->fn = cb;
425 
426     new->next = lock->cb_items;
427     lock->cb_items = new;
428 
429     return 1;
430 }
431 
ossl_rcu_uptr_deref(void ** p)432 void *ossl_rcu_uptr_deref(void **p)
433 {
434     return (void *)*p;
435 }
436 
ossl_rcu_assign_uptr(void ** p,void ** v)437 void ossl_rcu_assign_uptr(void **p, void **v)
438 {
439     InterlockedExchangePointer((void * volatile *)p, (void *)*v);
440 }
441 
442 
CRYPTO_THREAD_lock_new(void)443 CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
444 {
445     CRYPTO_RWLOCK *lock;
446 # ifdef USE_RWLOCK
447     CRYPTO_win_rwlock *rwlock;
448 
449     if ((lock = OPENSSL_zalloc(sizeof(CRYPTO_win_rwlock))) == NULL)
450         /* Don't set error, to avoid recursion blowup. */
451         return NULL;
452     rwlock = lock;
453     InitializeSRWLock(&rwlock->lock);
454 # else
455 
456     if ((lock = OPENSSL_zalloc(sizeof(CRITICAL_SECTION))) == NULL)
457         /* Don't set error, to avoid recursion blowup. */
458         return NULL;
459 
460 #  if !defined(_WIN32_WCE)
461     /* 0x400 is the spin count value suggested in the documentation */
462     if (!InitializeCriticalSectionAndSpinCount(lock, 0x400)) {
463         OPENSSL_free(lock);
464         return NULL;
465     }
466 #  else
467     InitializeCriticalSection(lock);
468 #  endif
469 # endif
470 
471     return lock;
472 }
473 
CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK * lock)474 __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
475 {
476 # ifdef USE_RWLOCK
477     CRYPTO_win_rwlock *rwlock = lock;
478 
479     AcquireSRWLockShared(&rwlock->lock);
480 # else
481     EnterCriticalSection(lock);
482 # endif
483     return 1;
484 }
485 
CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK * lock)486 __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
487 {
488 # ifdef USE_RWLOCK
489     CRYPTO_win_rwlock *rwlock = lock;
490 
491     AcquireSRWLockExclusive(&rwlock->lock);
492     rwlock->exclusive = 1;
493 # else
494     EnterCriticalSection(lock);
495 # endif
496     return 1;
497 }
498 
CRYPTO_THREAD_unlock(CRYPTO_RWLOCK * lock)499 int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
500 {
501 # ifdef USE_RWLOCK
502     CRYPTO_win_rwlock *rwlock = lock;
503 
504     if (rwlock->exclusive) {
505         rwlock->exclusive = 0;
506         ReleaseSRWLockExclusive(&rwlock->lock);
507     } else {
508         ReleaseSRWLockShared(&rwlock->lock);
509     }
510 # else
511     LeaveCriticalSection(lock);
512 # endif
513     return 1;
514 }
515 
CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK * lock)516 void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
517 {
518     if (lock == NULL)
519         return;
520 
521 # ifndef USE_RWLOCK
522     DeleteCriticalSection(lock);
523 # endif
524     OPENSSL_free(lock);
525 
526     return;
527 }
528 
529 # define ONCE_UNINITED     0
530 # define ONCE_ININIT       1
531 # define ONCE_DONE         2
532 
533 /*
534  * We don't use InitOnceExecuteOnce because that isn't available in WinXP which
535  * we still have to support.
536  */
CRYPTO_THREAD_run_once(CRYPTO_ONCE * once,void (* init)(void))537 int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
538 {
539     LONG volatile *lock = (LONG *)once;
540     LONG result;
541 
542     if (*lock == ONCE_DONE)
543         return 1;
544 
545     do {
546         result = InterlockedCompareExchange(lock, ONCE_ININIT, ONCE_UNINITED);
547         if (result == ONCE_UNINITED) {
548             init();
549             *lock = ONCE_DONE;
550             return 1;
551         }
552     } while (result == ONCE_ININIT);
553 
554     return (*lock == ONCE_DONE);
555 }
556 
CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL * key,void (* cleanup)(void *))557 int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
558 {
559     *key = TlsAlloc();
560     if (*key == TLS_OUT_OF_INDEXES)
561         return 0;
562 
563     return 1;
564 }
565 
CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL * key)566 void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
567 {
568     DWORD last_error;
569     void *ret;
570 
571     /*
572      * TlsGetValue clears the last error even on success, so that callers may
573      * distinguish it successfully returning NULL or failing. It is documented
574      * to never fail if the argument is a valid index from TlsAlloc, so we do
575      * not need to handle this.
576      *
577      * However, this error-mangling behavior interferes with the caller's use of
578      * GetLastError. In particular SSL_get_error queries the error queue to
579      * determine whether the caller should look at the OS's errors. To avoid
580      * destroying state, save and restore the Windows error.
581      *
582      * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686812(v=vs.85).aspx
583      */
584     last_error = GetLastError();
585     ret = TlsGetValue(*key);
586     SetLastError(last_error);
587     return ret;
588 }
589 
CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL * key,void * val)590 int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
591 {
592     if (TlsSetValue(*key, val) == 0)
593         return 0;
594 
595     return 1;
596 }
597 
CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL * key)598 int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
599 {
600     if (TlsFree(*key) == 0)
601         return 0;
602 
603     return 1;
604 }
605 
CRYPTO_THREAD_get_current_id(void)606 CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
607 {
608     return GetCurrentThreadId();
609 }
610 
CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a,CRYPTO_THREAD_ID b)611 int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
612 {
613     return (a == b);
614 }
615 
CRYPTO_atomic_add(int * val,int amount,int * ret,CRYPTO_RWLOCK * lock)616 int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
617 {
618 # if (defined(NO_INTERLOCKEDOR64))
619     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
620         return 0;
621     *val += amount;
622     *ret = *val;
623 
624     if (!CRYPTO_THREAD_unlock(lock))
625         return 0;
626 
627     return 1;
628 # else
629     *ret = (int)InterlockedExchangeAdd((LONG volatile *)val, (LONG)amount)
630         + amount;
631     return 1;
632 # endif
633 }
634 
CRYPTO_atomic_add64(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)635 int CRYPTO_atomic_add64(uint64_t *val, uint64_t op, uint64_t *ret,
636                         CRYPTO_RWLOCK *lock)
637 {
638 # if (defined(NO_INTERLOCKEDOR64))
639     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
640         return 0;
641     *val += op;
642     *ret = *val;
643 
644     if (!CRYPTO_THREAD_unlock(lock))
645         return 0;
646 
647     return 1;
648 # else
649     *ret = (uint64_t)InterlockedAdd64((LONG64 volatile *)val, (LONG64)op);
650     return 1;
651 # endif
652 }
653 
CRYPTO_atomic_and(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)654 int CRYPTO_atomic_and(uint64_t *val, uint64_t op, uint64_t *ret,
655                       CRYPTO_RWLOCK *lock)
656 {
657 # if (defined(NO_INTERLOCKEDOR64))
658     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
659         return 0;
660     *val &= op;
661     *ret = *val;
662 
663     if (!CRYPTO_THREAD_unlock(lock))
664         return 0;
665 
666     return 1;
667 # else
668     *ret = (uint64_t)InterlockedAnd64((LONG64 volatile *)val, (LONG64)op) & op;
669     return 1;
670 # endif
671 }
672 
CRYPTO_atomic_or(uint64_t * val,uint64_t op,uint64_t * ret,CRYPTO_RWLOCK * lock)673 int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
674                      CRYPTO_RWLOCK *lock)
675 {
676 # if (defined(NO_INTERLOCKEDOR64))
677     if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
678         return 0;
679     *val |= op;
680     *ret = *val;
681 
682     if (!CRYPTO_THREAD_unlock(lock))
683         return 0;
684 
685     return 1;
686 # else
687     *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, (LONG64)op) | op;
688     return 1;
689 # endif
690 }
691 
CRYPTO_atomic_load(uint64_t * val,uint64_t * ret,CRYPTO_RWLOCK * lock)692 int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
693 {
694 # if (defined(NO_INTERLOCKEDOR64))
695     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
696         return 0;
697     *ret = *val;
698     if (!CRYPTO_THREAD_unlock(lock))
699         return 0;
700 
701     return 1;
702 # else
703     *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, 0);
704     return 1;
705 # endif
706 }
707 
CRYPTO_atomic_store(uint64_t * dst,uint64_t val,CRYPTO_RWLOCK * lock)708 int CRYPTO_atomic_store(uint64_t *dst, uint64_t val, CRYPTO_RWLOCK *lock)
709 {
710 # if (defined(NO_INTERLOCKEDOR64))
711     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
712         return 0;
713     *dst = val;
714     if (!CRYPTO_THREAD_unlock(lock))
715         return 0;
716 
717     return 1;
718 # else
719     InterlockedExchange64(dst, val);
720     return 1;
721 # endif
722 }
723 
CRYPTO_atomic_load_int(int * val,int * ret,CRYPTO_RWLOCK * lock)724 int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
725 {
726 # if (defined(NO_INTERLOCKEDOR64))
727     if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
728         return 0;
729     *ret = *val;
730     if (!CRYPTO_THREAD_unlock(lock))
731         return 0;
732 
733     return 1;
734 # else
735     /* On Windows, LONG (but not long) is always the same size as int. */
736     *ret = (int)InterlockedOr((LONG volatile *)val, 0);
737     return 1;
738 # endif
739 }
740 
openssl_init_fork_handlers(void)741 int openssl_init_fork_handlers(void)
742 {
743     return 0;
744 }
745 
openssl_get_fork_id(void)746 int openssl_get_fork_id(void)
747 {
748     return 0;
749 }
750 #endif
751