1 /*
2 * Copyright (c) 2024 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8
9 #include <zephyr/arch/arc/v2/vpx/arc_vpx.h>
10
11 #ifndef CONFIG_ARC_VPX_COOPERATIVE_SHARING
12 #error "Rebuild with the ARC_VPX_COOPERATIVE_SHARING config option enabled"
13 #endif
14
15 #define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACK_SIZE)
16
17 static void timer_func(struct k_timer *timer);
18
19 K_THREAD_STACK_DEFINE(payload_stack, STACK_SIZE);
20
21 static K_TIMER_DEFINE(my_timer, timer_func, NULL);
22
23 static struct k_thread payload_thread;
24
25 static volatile int isr_result;
26 static volatile unsigned int isr_vpx_lock_id;
27
28 /**
29 * Obtain the current CPU id.
30 */
current_cpu_id_get(void)31 static int current_cpu_id_get(void)
32 {
33 int key;
34 int id;
35
36 key = arch_irq_lock();
37 id = _current_cpu->id;
38 arch_irq_unlock(key);
39
40 return id;
41 }
42
timer_func(struct k_timer * timer)43 static void timer_func(struct k_timer *timer)
44 {
45 arc_vpx_unlock_force(isr_vpx_lock_id);
46 }
47
arc_vpx_lock_unlock_timed_payload(void * p1,void * p2,void * p3)48 static void arc_vpx_lock_unlock_timed_payload(void *p1, void *p2, void *p3)
49 {
50 int status;
51 unsigned int cpu_id;
52
53 cpu_id = (unsigned int)(uintptr_t)(p1);
54 ARG_UNUSED(p2);
55 ARG_UNUSED(p3);
56
57 status = arc_vpx_lock(K_NO_WAIT);
58 zassert_equal(0, status, "Expected return value %d, not %d\n", 0, status);
59
60 /*
61 * In 1 second, forcibly release the VPX lock. However, wait up to
62 * 5 seconds before considering this a failure.
63 */
64
65 isr_vpx_lock_id = cpu_id;
66 k_timer_start(&my_timer, K_MSEC(1000), K_FOREVER);
67
68 status = arc_vpx_lock(K_MSEC(5000));
69 zassert_equal(0, status, "Expected return value %d, not %d\n", 0, status);
70
71 arc_vpx_unlock();
72 }
73
ZTEST(vpx_lock,test_arc_vpx_lock_unlock_timed)74 ZTEST(vpx_lock, test_arc_vpx_lock_unlock_timed)
75 {
76 int priority;
77 int cpu_id;
78
79 priority = k_thread_priority_get(k_current_get());
80 cpu_id = current_cpu_id_get();
81
82 k_thread_create(&payload_thread, payload_stack, STACK_SIZE,
83 arc_vpx_lock_unlock_timed_payload,
84 (void *)(uintptr_t)cpu_id, NULL, NULL,
85 priority - 2, 0, K_FOREVER);
86
87 #if defined(CONFIG_SCHED_CPU_MASK) && (CONFIG_MP_MAX_NUM_CPUS > 1)
88 k_thread_cpu_pin(&payload_thread, cpu_id);
89 #endif
90 k_thread_start(&payload_thread);
91
92 k_thread_join(&payload_thread, K_FOREVER);
93 }
94
arc_vpx_lock_unlock_payload(void * p1,void * p2,void * p3)95 static void arc_vpx_lock_unlock_payload(void *p1, void *p2, void *p3)
96 {
97 int status;
98
99 ARG_UNUSED(p1);
100 ARG_UNUSED(p2);
101 ARG_UNUSED(p3);
102
103 /* The VPX lock is available; take it. */
104
105 status = arc_vpx_lock(K_NO_WAIT);
106 zassert_equal(0, status, "Expected return value %d, not %d\n", 0, status);
107
108 /* The VPX lock has already been taken; expect errors */
109
110 status = arc_vpx_lock(K_NO_WAIT);
111 zassert_equal(-EBUSY, status, "Expected return value %d (-EBUSY), not %d\n",
112 -EBUSY, status);
113
114 status = arc_vpx_lock(K_MSEC(10));
115 zassert_equal(-EAGAIN, status, "Expected return value %d (-EAGAIN), not %d\n",
116 -EAGAIN, status);
117
118 /* Verify that unlocking makes it available */
119
120 arc_vpx_unlock();
121
122 status = arc_vpx_lock(K_NO_WAIT);
123 zassert_equal(0, status, "Expected return value %d, not %d\n", 0, status);
124 arc_vpx_unlock();
125 }
126
ZTEST(vpx_lock,test_arc_vpx_lock_unlock)127 ZTEST(vpx_lock, test_arc_vpx_lock_unlock)
128 {
129 int priority;
130 int cpu_id;
131
132 priority = k_thread_priority_get(k_current_get());
133 cpu_id = current_cpu_id_get();
134
135 k_thread_create(&payload_thread, payload_stack, STACK_SIZE,
136 arc_vpx_lock_unlock_payload, NULL, NULL, NULL,
137 priority - 2, 0, K_FOREVER);
138
139 #if defined(CONFIG_SCHED_CPU_MASK) && (CONFIG_MP_MAX_NUM_CPUS > 1)
140 k_thread_cpu_pin(&payload_thread, cpu_id);
141 #endif
142 k_thread_start(&payload_thread);
143
144 k_thread_join(&payload_thread, K_FOREVER);
145 }
146
147 ZTEST_SUITE(vpx_lock, NULL, NULL, NULL, NULL, NULL);
148