Lines Matching refs:hwlock
91 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) in __hwspin_trylock() argument
95 if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE))) in __hwspin_trylock()
113 ret = spin_trylock_irqsave(&hwlock->lock, *flags); in __hwspin_trylock()
116 ret = spin_trylock_irq(&hwlock->lock); in __hwspin_trylock()
123 ret = spin_trylock(&hwlock->lock); in __hwspin_trylock()
132 ret = hwlock->bank->ops->trylock(hwlock); in __hwspin_trylock()
138 spin_unlock_irqrestore(&hwlock->lock, *flags); in __hwspin_trylock()
141 spin_unlock_irq(&hwlock->lock); in __hwspin_trylock()
148 spin_unlock(&hwlock->lock); in __hwspin_trylock()
206 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, in __hwspin_lock_timeout() argument
216 ret = __hwspin_trylock(hwlock, mode, flags); in __hwspin_lock_timeout()
238 if (hwlock->bank->ops->relax) in __hwspin_lock_timeout()
239 hwlock->bank->ops->relax(hwlock); in __hwspin_lock_timeout()
265 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) in __hwspin_unlock() argument
267 if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE))) in __hwspin_unlock()
284 hwlock->bank->ops->unlock(hwlock); in __hwspin_unlock()
289 spin_unlock_irqrestore(&hwlock->lock, *flags); in __hwspin_unlock()
292 spin_unlock_irq(&hwlock->lock); in __hwspin_unlock()
299 spin_unlock(&hwlock->lock); in __hwspin_unlock()
342 struct hwspinlock *hwlock; in of_hwspin_lock_get_id() local
362 hwlock = radix_tree_deref_slot(slot); in of_hwspin_lock_get_id()
363 if (unlikely(!hwlock)) in of_hwspin_lock_get_id()
365 if (radix_tree_deref_retry(hwlock)) { in of_hwspin_lock_get_id()
370 if (device_match_of_node(hwlock->bank->dev, args.np)) { in of_hwspin_lock_get_id()
380 if (id < 0 || id >= hwlock->bank->num_locks) { in of_hwspin_lock_get_id()
384 id += hwlock->bank->base_id; in of_hwspin_lock_get_id()
421 static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id) in hwspin_lock_register_single() argument
428 ret = radix_tree_insert(&hwspinlock_tree, id, hwlock); in hwspin_lock_register_single()
439 WARN_ON(tmp != hwlock); in hwspin_lock_register_single()
448 struct hwspinlock *hwlock = NULL; in hwspin_lock_unregister_single() local
460 hwlock = radix_tree_delete(&hwspinlock_tree, id); in hwspin_lock_unregister_single()
461 if (!hwlock) { in hwspin_lock_unregister_single()
468 return hwlock; in hwspin_lock_unregister_single()
489 struct hwspinlock *hwlock; in hwspin_lock_register() local
504 hwlock = &bank->lock[i]; in hwspin_lock_register()
506 spin_lock_init(&hwlock->lock); in hwspin_lock_register()
507 hwlock->bank = bank; in hwspin_lock_register()
509 ret = hwspin_lock_register_single(hwlock, base_id + i); in hwspin_lock_register()
536 struct hwspinlock *hwlock, *tmp; in hwspin_lock_unregister() local
540 hwlock = &bank->lock[i]; in hwspin_lock_unregister()
547 WARN_ON(tmp != hwlock); in hwspin_lock_unregister()
646 static int __hwspin_lock_request(struct hwspinlock *hwlock) in __hwspin_lock_request() argument
648 struct device *dev = hwlock->bank->dev; in __hwspin_lock_request()
670 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock), in __hwspin_lock_request()
674 WARN_ON(tmp != hwlock); in __hwspin_lock_request()
685 int hwspin_lock_get_id(struct hwspinlock *hwlock) in hwspin_lock_get_id() argument
687 if (!hwlock) { in hwspin_lock_get_id()
692 return hwlock_to_id(hwlock); in hwspin_lock_get_id()
711 struct hwspinlock *hwlock; in hwspin_lock_request() local
717 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, in hwspin_lock_request()
721 hwlock = NULL; in hwspin_lock_request()
729 ret = __hwspin_lock_request(hwlock); in hwspin_lock_request()
731 hwlock = NULL; in hwspin_lock_request()
735 return hwlock; in hwspin_lock_request()
754 struct hwspinlock *hwlock; in hwspin_lock_request_specific() local
760 hwlock = radix_tree_lookup(&hwspinlock_tree, id); in hwspin_lock_request_specific()
761 if (!hwlock) { in hwspin_lock_request_specific()
767 WARN_ON(hwlock_to_id(hwlock) != id); in hwspin_lock_request_specific()
773 hwlock = NULL; in hwspin_lock_request_specific()
778 ret = __hwspin_lock_request(hwlock); in hwspin_lock_request_specific()
780 hwlock = NULL; in hwspin_lock_request_specific()
784 return hwlock; in hwspin_lock_request_specific()
800 int hwspin_lock_free(struct hwspinlock *hwlock) in hwspin_lock_free() argument
806 if (!hwlock) { in hwspin_lock_free()
811 dev = hwlock->bank->dev; in hwspin_lock_free()
815 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock), in hwspin_lock_free()
828 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock), in hwspin_lock_free()
832 WARN_ON(tmp != hwlock); in hwspin_lock_free()
844 struct hwspinlock **hwlock = res; in devm_hwspin_lock_match() local
846 if (WARN_ON(!hwlock || !*hwlock)) in devm_hwspin_lock_match()
849 return *hwlock == data; in devm_hwspin_lock_match()
870 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock) in devm_hwspin_lock_free() argument
875 devm_hwspin_lock_match, hwlock); in devm_hwspin_lock_free()
898 struct hwspinlock **ptr, *hwlock; in devm_hwspin_lock_request() local
904 hwlock = hwspin_lock_request(); in devm_hwspin_lock_request()
905 if (hwlock) { in devm_hwspin_lock_request()
906 *ptr = hwlock; in devm_hwspin_lock_request()
912 return hwlock; in devm_hwspin_lock_request()
934 struct hwspinlock **ptr, *hwlock; in devm_hwspin_lock_request_specific() local
940 hwlock = hwspin_lock_request_specific(id); in devm_hwspin_lock_request_specific()
941 if (hwlock) { in devm_hwspin_lock_request_specific()
942 *ptr = hwlock; in devm_hwspin_lock_request_specific()
948 return hwlock; in devm_hwspin_lock_request_specific()