1 // SPDX-License-Identifier: LGPL-2.1+
2
3 #include <linux/cleanup.h>
4 #include <linux/cpu.h>
5 #include <linux/cpumask.h>
6 #include <linux/interrupt.h>
7 #include <linux/irq.h>
8 #include <linux/irqdesc.h>
9 #include <linux/irqdomain.h>
10 #include <linux/nodemask.h>
11 #include <kunit/test.h>
12
13 #include "internals.h"
14
noop_handler(int irq,void * data)15 static irqreturn_t noop_handler(int irq, void *data)
16 {
17 return IRQ_HANDLED;
18 }
19
noop(struct irq_data * data)20 static void noop(struct irq_data *data) { }
noop_ret(struct irq_data * data)21 static unsigned int noop_ret(struct irq_data *data) { return 0; }
22
noop_affinity(struct irq_data * data,const struct cpumask * dest,bool force)23 static int noop_affinity(struct irq_data *data, const struct cpumask *dest,
24 bool force)
25 {
26 irq_data_update_effective_affinity(data, dest);
27
28 return 0;
29 }
30
31 static struct irq_chip fake_irq_chip = {
32 .name = "fake",
33 .irq_startup = noop_ret,
34 .irq_shutdown = noop,
35 .irq_enable = noop,
36 .irq_disable = noop,
37 .irq_ack = noop,
38 .irq_mask = noop,
39 .irq_unmask = noop,
40 .irq_set_affinity = noop_affinity,
41 .flags = IRQCHIP_SKIP_SET_WAKE,
42 };
43
irq_disable_depth_test(struct kunit * test)44 static void irq_disable_depth_test(struct kunit *test)
45 {
46 struct irq_desc *desc;
47 int virq, ret;
48
49 virq = irq_domain_alloc_descs(-1, 1, 0, NUMA_NO_NODE, NULL);
50 KUNIT_ASSERT_GE(test, virq, 0);
51
52 irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
53
54 desc = irq_to_desc(virq);
55 KUNIT_ASSERT_PTR_NE(test, desc, NULL);
56
57 ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
58 KUNIT_EXPECT_EQ(test, ret, 0);
59
60 KUNIT_EXPECT_EQ(test, desc->depth, 0);
61
62 disable_irq(virq);
63 KUNIT_EXPECT_EQ(test, desc->depth, 1);
64
65 enable_irq(virq);
66 KUNIT_EXPECT_EQ(test, desc->depth, 0);
67
68 free_irq(virq, NULL);
69 }
70
irq_free_disabled_test(struct kunit * test)71 static void irq_free_disabled_test(struct kunit *test)
72 {
73 struct irq_desc *desc;
74 int virq, ret;
75
76 virq = irq_domain_alloc_descs(-1, 1, 0, NUMA_NO_NODE, NULL);
77 KUNIT_ASSERT_GE(test, virq, 0);
78
79 irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
80
81 desc = irq_to_desc(virq);
82 KUNIT_ASSERT_PTR_NE(test, desc, NULL);
83
84 ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
85 KUNIT_EXPECT_EQ(test, ret, 0);
86
87 KUNIT_EXPECT_EQ(test, desc->depth, 0);
88
89 disable_irq(virq);
90 KUNIT_EXPECT_EQ(test, desc->depth, 1);
91
92 free_irq(virq, NULL);
93 KUNIT_EXPECT_GE(test, desc->depth, 1);
94
95 ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
96 KUNIT_EXPECT_EQ(test, ret, 0);
97 KUNIT_EXPECT_EQ(test, desc->depth, 0);
98
99 free_irq(virq, NULL);
100 }
101
irq_shutdown_depth_test(struct kunit * test)102 static void irq_shutdown_depth_test(struct kunit *test)
103 {
104 struct irq_desc *desc;
105 struct irq_data *data;
106 int virq, ret;
107 struct irq_affinity_desc affinity = {
108 .is_managed = 1,
109 .mask = CPU_MASK_ALL,
110 };
111
112 if (!IS_ENABLED(CONFIG_SMP))
113 kunit_skip(test, "requires CONFIG_SMP for managed shutdown");
114
115 virq = irq_domain_alloc_descs(-1, 1, 0, NUMA_NO_NODE, &affinity);
116 KUNIT_ASSERT_GE(test, virq, 0);
117
118 irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
119
120 desc = irq_to_desc(virq);
121 KUNIT_ASSERT_PTR_NE(test, desc, NULL);
122
123 data = irq_desc_get_irq_data(desc);
124 KUNIT_ASSERT_PTR_NE(test, data, NULL);
125
126 ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
127 KUNIT_EXPECT_EQ(test, ret, 0);
128
129 KUNIT_EXPECT_TRUE(test, irqd_is_activated(data));
130 KUNIT_EXPECT_TRUE(test, irqd_is_started(data));
131 KUNIT_EXPECT_TRUE(test, irqd_affinity_is_managed(data));
132
133 KUNIT_EXPECT_EQ(test, desc->depth, 0);
134
135 disable_irq(virq);
136 KUNIT_EXPECT_EQ(test, desc->depth, 1);
137
138 scoped_guard(raw_spinlock_irqsave, &desc->lock)
139 irq_shutdown_and_deactivate(desc);
140
141 KUNIT_EXPECT_FALSE(test, irqd_is_activated(data));
142 KUNIT_EXPECT_FALSE(test, irqd_is_started(data));
143
144 KUNIT_EXPECT_EQ(test, irq_activate(desc), 0);
145 #ifdef CONFIG_SMP
146 irq_startup_managed(desc);
147 #endif
148
149 KUNIT_EXPECT_EQ(test, desc->depth, 1);
150
151 enable_irq(virq);
152 KUNIT_EXPECT_EQ(test, desc->depth, 0);
153
154 free_irq(virq, NULL);
155 }
156
irq_cpuhotplug_test(struct kunit * test)157 static void irq_cpuhotplug_test(struct kunit *test)
158 {
159 struct irq_desc *desc;
160 struct irq_data *data;
161 int virq, ret;
162 struct irq_affinity_desc affinity = {
163 .is_managed = 1,
164 };
165
166 if (!IS_ENABLED(CONFIG_SMP))
167 kunit_skip(test, "requires CONFIG_SMP for CPU hotplug");
168 if (!get_cpu_device(1))
169 kunit_skip(test, "requires more than 1 CPU for CPU hotplug");
170 if (!cpu_is_hotpluggable(1))
171 kunit_skip(test, "CPU 1 must be hotpluggable");
172
173 cpumask_copy(&affinity.mask, cpumask_of(1));
174
175 virq = irq_domain_alloc_descs(-1, 1, 0, NUMA_NO_NODE, &affinity);
176 KUNIT_ASSERT_GE(test, virq, 0);
177
178 irq_set_chip_and_handler(virq, &fake_irq_chip, handle_simple_irq);
179
180 desc = irq_to_desc(virq);
181 KUNIT_ASSERT_PTR_NE(test, desc, NULL);
182
183 data = irq_desc_get_irq_data(desc);
184 KUNIT_ASSERT_PTR_NE(test, data, NULL);
185
186 ret = request_irq(virq, noop_handler, 0, "test_irq", NULL);
187 KUNIT_EXPECT_EQ(test, ret, 0);
188
189 KUNIT_EXPECT_TRUE(test, irqd_is_activated(data));
190 KUNIT_EXPECT_TRUE(test, irqd_is_started(data));
191 KUNIT_EXPECT_TRUE(test, irqd_affinity_is_managed(data));
192
193 KUNIT_EXPECT_EQ(test, desc->depth, 0);
194
195 disable_irq(virq);
196 KUNIT_EXPECT_EQ(test, desc->depth, 1);
197
198 KUNIT_EXPECT_EQ(test, remove_cpu(1), 0);
199 KUNIT_EXPECT_FALSE(test, irqd_is_activated(data));
200 KUNIT_EXPECT_FALSE(test, irqd_is_started(data));
201 KUNIT_EXPECT_GE(test, desc->depth, 1);
202 KUNIT_EXPECT_EQ(test, add_cpu(1), 0);
203
204 KUNIT_EXPECT_FALSE(test, irqd_is_activated(data));
205 KUNIT_EXPECT_FALSE(test, irqd_is_started(data));
206 KUNIT_EXPECT_EQ(test, desc->depth, 1);
207
208 enable_irq(virq);
209 KUNIT_EXPECT_TRUE(test, irqd_is_activated(data));
210 KUNIT_EXPECT_TRUE(test, irqd_is_started(data));
211 KUNIT_EXPECT_EQ(test, desc->depth, 0);
212
213 free_irq(virq, NULL);
214 }
215
216 static struct kunit_case irq_test_cases[] = {
217 KUNIT_CASE(irq_disable_depth_test),
218 KUNIT_CASE(irq_free_disabled_test),
219 KUNIT_CASE(irq_shutdown_depth_test),
220 KUNIT_CASE(irq_cpuhotplug_test),
221 {}
222 };
223
224 static struct kunit_suite irq_test_suite = {
225 .name = "irq_test_cases",
226 .test_cases = irq_test_cases,
227 };
228
229 kunit_test_suite(irq_test_suite);
230 MODULE_DESCRIPTION("IRQ unit test suite");
231 MODULE_LICENSE("GPL");
232