1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * irq_comm.c: Common API for in kernel interrupt controller
4   * Copyright (c) 2007, Intel Corporation.
5   *
6   * Authors:
7   *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
8   *
9   * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10   */
11  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12  
13  #include <linux/kvm_host.h>
14  #include <linux/slab.h>
15  #include <linux/export.h>
16  #include <linux/rculist.h>
17  
18  #include <trace/events/kvm.h>
19  
20  #include "irq.h"
21  
22  #include "ioapic.h"
23  
24  #include "lapic.h"
25  
26  #include "hyperv.h"
27  #include "x86.h"
28  #include "xen.h"
29  
kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)30  static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
31  			   struct kvm *kvm, int irq_source_id, int level,
32  			   bool line_status)
33  {
34  	struct kvm_pic *pic = kvm->arch.vpic;
35  	return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
36  }
37  
kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)38  static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
39  			      struct kvm *kvm, int irq_source_id, int level,
40  			      bool line_status)
41  {
42  	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
43  	return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
44  				line_status);
45  }
46  
kvm_irq_delivery_to_apic(struct kvm * kvm,struct kvm_lapic * src,struct kvm_lapic_irq * irq,struct dest_map * dest_map)47  int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
48  		struct kvm_lapic_irq *irq, struct dest_map *dest_map)
49  {
50  	int r = -1;
51  	struct kvm_vcpu *vcpu, *lowest = NULL;
52  	unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
53  	unsigned int dest_vcpus = 0;
54  
55  	if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
56  		return r;
57  
58  	if (irq->dest_mode == APIC_DEST_PHYSICAL &&
59  	    irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) {
60  		pr_info("apic: phys broadcast and lowest prio\n");
61  		irq->delivery_mode = APIC_DM_FIXED;
62  	}
63  
64  	memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap));
65  
66  	kvm_for_each_vcpu(i, vcpu, kvm) {
67  		if (!kvm_apic_present(vcpu))
68  			continue;
69  
70  		if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
71  					irq->dest_id, irq->dest_mode))
72  			continue;
73  
74  		if (!kvm_lowest_prio_delivery(irq)) {
75  			if (r < 0)
76  				r = 0;
77  			r += kvm_apic_set_irq(vcpu, irq, dest_map);
78  		} else if (kvm_apic_sw_enabled(vcpu->arch.apic)) {
79  			if (!kvm_vector_hashing_enabled()) {
80  				if (!lowest)
81  					lowest = vcpu;
82  				else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
83  					lowest = vcpu;
84  			} else {
85  				__set_bit(i, dest_vcpu_bitmap);
86  				dest_vcpus++;
87  			}
88  		}
89  	}
90  
91  	if (dest_vcpus != 0) {
92  		int idx = kvm_vector_to_index(irq->vector, dest_vcpus,
93  					dest_vcpu_bitmap, KVM_MAX_VCPUS);
94  
95  		lowest = kvm_get_vcpu(kvm, idx);
96  	}
97  
98  	if (lowest)
99  		r = kvm_apic_set_irq(lowest, irq, dest_map);
100  
101  	return r;
102  }
103  
kvm_set_msi_irq(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,struct kvm_lapic_irq * irq)104  void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
105  		     struct kvm_lapic_irq *irq)
106  {
107  	struct msi_msg msg = { .address_lo = e->msi.address_lo,
108  			       .address_hi = e->msi.address_hi,
109  			       .data = e->msi.data };
110  
111  	trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ?
112  			      (u64)msg.address_hi << 32 : 0), msg.data);
113  
114  	irq->dest_id = x86_msi_msg_get_destid(&msg, kvm->arch.x2apic_format);
115  	irq->vector = msg.arch_data.vector;
116  	irq->dest_mode = kvm_lapic_irq_dest_mode(msg.arch_addr_lo.dest_mode_logical);
117  	irq->trig_mode = msg.arch_data.is_level;
118  	irq->delivery_mode = msg.arch_data.delivery_mode << 8;
119  	irq->msi_redir_hint = msg.arch_addr_lo.redirect_hint;
120  	irq->level = 1;
121  	irq->shorthand = APIC_DEST_NOSHORT;
122  }
123  EXPORT_SYMBOL_GPL(kvm_set_msi_irq);
124  
kvm_msi_route_invalid(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e)125  static inline bool kvm_msi_route_invalid(struct kvm *kvm,
126  		struct kvm_kernel_irq_routing_entry *e)
127  {
128  	return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff);
129  }
130  
kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)131  int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
132  		struct kvm *kvm, int irq_source_id, int level, bool line_status)
133  {
134  	struct kvm_lapic_irq irq;
135  
136  	if (kvm_msi_route_invalid(kvm, e))
137  		return -EINVAL;
138  
139  	if (!level)
140  		return -1;
141  
142  	kvm_set_msi_irq(kvm, e, &irq);
143  
144  	return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
145  }
146  
147  
kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)148  static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
149  		    struct kvm *kvm, int irq_source_id, int level,
150  		    bool line_status)
151  {
152  	if (!level)
153  		return -1;
154  
155  	return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
156  }
157  
kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)158  int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
159  			      struct kvm *kvm, int irq_source_id, int level,
160  			      bool line_status)
161  {
162  	struct kvm_lapic_irq irq;
163  	int r;
164  
165  	switch (e->type) {
166  	case KVM_IRQ_ROUTING_HV_SINT:
167  		return kvm_hv_set_sint(e, kvm, irq_source_id, level,
168  				       line_status);
169  
170  	case KVM_IRQ_ROUTING_MSI:
171  		if (kvm_msi_route_invalid(kvm, e))
172  			return -EINVAL;
173  
174  		kvm_set_msi_irq(kvm, e, &irq);
175  
176  		if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
177  			return r;
178  		break;
179  
180  #ifdef CONFIG_KVM_XEN
181  	case KVM_IRQ_ROUTING_XEN_EVTCHN:
182  		if (!level)
183  			return -1;
184  
185  		return kvm_xen_set_evtchn_fast(&e->xen_evtchn, kvm);
186  #endif
187  	default:
188  		break;
189  	}
190  
191  	return -EWOULDBLOCK;
192  }
193  
kvm_request_irq_source_id(struct kvm * kvm)194  int kvm_request_irq_source_id(struct kvm *kvm)
195  {
196  	unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
197  	int irq_source_id;
198  
199  	mutex_lock(&kvm->irq_lock);
200  	irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
201  
202  	if (irq_source_id >= BITS_PER_LONG) {
203  		pr_warn("exhausted allocatable IRQ sources!\n");
204  		irq_source_id = -EFAULT;
205  		goto unlock;
206  	}
207  
208  	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
209  	ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
210  	set_bit(irq_source_id, bitmap);
211  unlock:
212  	mutex_unlock(&kvm->irq_lock);
213  
214  	return irq_source_id;
215  }
216  
kvm_free_irq_source_id(struct kvm * kvm,int irq_source_id)217  void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
218  {
219  	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
220  	ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
221  
222  	mutex_lock(&kvm->irq_lock);
223  	if (irq_source_id < 0 ||
224  	    irq_source_id >= BITS_PER_LONG) {
225  		pr_err("IRQ source ID out of range!\n");
226  		goto unlock;
227  	}
228  	clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
229  	if (!irqchip_kernel(kvm))
230  		goto unlock;
231  
232  	kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
233  	kvm_pic_clear_all(kvm->arch.vpic, irq_source_id);
234  unlock:
235  	mutex_unlock(&kvm->irq_lock);
236  }
237  
kvm_register_irq_mask_notifier(struct kvm * kvm,int irq,struct kvm_irq_mask_notifier * kimn)238  void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
239  				    struct kvm_irq_mask_notifier *kimn)
240  {
241  	mutex_lock(&kvm->irq_lock);
242  	kimn->irq = irq;
243  	hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list);
244  	mutex_unlock(&kvm->irq_lock);
245  }
246  
kvm_unregister_irq_mask_notifier(struct kvm * kvm,int irq,struct kvm_irq_mask_notifier * kimn)247  void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
248  				      struct kvm_irq_mask_notifier *kimn)
249  {
250  	mutex_lock(&kvm->irq_lock);
251  	hlist_del_rcu(&kimn->link);
252  	mutex_unlock(&kvm->irq_lock);
253  	synchronize_srcu(&kvm->irq_srcu);
254  }
255  
kvm_fire_mask_notifiers(struct kvm * kvm,unsigned irqchip,unsigned pin,bool mask)256  void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
257  			     bool mask)
258  {
259  	struct kvm_irq_mask_notifier *kimn;
260  	int idx, gsi;
261  
262  	idx = srcu_read_lock(&kvm->irq_srcu);
263  	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
264  	if (gsi != -1)
265  		hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link)
266  			if (kimn->irq == gsi)
267  				kimn->func(kimn, mask);
268  	srcu_read_unlock(&kvm->irq_srcu, idx);
269  }
270  
kvm_arch_can_set_irq_routing(struct kvm * kvm)271  bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
272  {
273  	return irqchip_in_kernel(kvm);
274  }
275  
kvm_set_routing_entry(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue)276  int kvm_set_routing_entry(struct kvm *kvm,
277  			  struct kvm_kernel_irq_routing_entry *e,
278  			  const struct kvm_irq_routing_entry *ue)
279  {
280  	/* We can't check irqchip_in_kernel() here as some callers are
281  	 * currently initializing the irqchip. Other callers should therefore
282  	 * check kvm_arch_can_set_irq_routing() before calling this function.
283  	 */
284  	switch (ue->type) {
285  	case KVM_IRQ_ROUTING_IRQCHIP:
286  		if (irqchip_split(kvm))
287  			return -EINVAL;
288  		e->irqchip.pin = ue->u.irqchip.pin;
289  		switch (ue->u.irqchip.irqchip) {
290  		case KVM_IRQCHIP_PIC_SLAVE:
291  			e->irqchip.pin += PIC_NUM_PINS / 2;
292  			fallthrough;
293  		case KVM_IRQCHIP_PIC_MASTER:
294  			if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2)
295  				return -EINVAL;
296  			e->set = kvm_set_pic_irq;
297  			break;
298  		case KVM_IRQCHIP_IOAPIC:
299  			if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS)
300  				return -EINVAL;
301  			e->set = kvm_set_ioapic_irq;
302  			break;
303  		default:
304  			return -EINVAL;
305  		}
306  		e->irqchip.irqchip = ue->u.irqchip.irqchip;
307  		break;
308  	case KVM_IRQ_ROUTING_MSI:
309  		e->set = kvm_set_msi;
310  		e->msi.address_lo = ue->u.msi.address_lo;
311  		e->msi.address_hi = ue->u.msi.address_hi;
312  		e->msi.data = ue->u.msi.data;
313  
314  		if (kvm_msi_route_invalid(kvm, e))
315  			return -EINVAL;
316  		break;
317  	case KVM_IRQ_ROUTING_HV_SINT:
318  		e->set = kvm_hv_set_sint;
319  		e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
320  		e->hv_sint.sint = ue->u.hv_sint.sint;
321  		break;
322  #ifdef CONFIG_KVM_XEN
323  	case KVM_IRQ_ROUTING_XEN_EVTCHN:
324  		return kvm_xen_setup_evtchn(kvm, e, ue);
325  #endif
326  	default:
327  		return -EINVAL;
328  	}
329  
330  	return 0;
331  }
332  
kvm_intr_is_single_vcpu(struct kvm * kvm,struct kvm_lapic_irq * irq,struct kvm_vcpu ** dest_vcpu)333  bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
334  			     struct kvm_vcpu **dest_vcpu)
335  {
336  	int r = 0;
337  	unsigned long i;
338  	struct kvm_vcpu *vcpu;
339  
340  	if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu))
341  		return true;
342  
343  	kvm_for_each_vcpu(i, vcpu, kvm) {
344  		if (!kvm_apic_present(vcpu))
345  			continue;
346  
347  		if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand,
348  					irq->dest_id, irq->dest_mode))
349  			continue;
350  
351  		if (++r == 2)
352  			return false;
353  
354  		*dest_vcpu = vcpu;
355  	}
356  
357  	return r == 1;
358  }
359  EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu);
360  
361  #define IOAPIC_ROUTING_ENTRY(irq) \
362  	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
363  	  .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
364  #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
365  
366  #define PIC_ROUTING_ENTRY(irq) \
367  	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
368  	  .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
369  #define ROUTING_ENTRY2(irq) \
370  	IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
371  
372  static const struct kvm_irq_routing_entry default_routing[] = {
373  	ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
374  	ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
375  	ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
376  	ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
377  	ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
378  	ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
379  	ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
380  	ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
381  	ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
382  	ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
383  	ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
384  	ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
385  };
386  
kvm_setup_default_irq_routing(struct kvm * kvm)387  int kvm_setup_default_irq_routing(struct kvm *kvm)
388  {
389  	return kvm_set_irq_routing(kvm, default_routing,
390  				   ARRAY_SIZE(default_routing), 0);
391  }
392  
393  static const struct kvm_irq_routing_entry empty_routing[] = {};
394  
kvm_setup_empty_irq_routing(struct kvm * kvm)395  int kvm_setup_empty_irq_routing(struct kvm *kvm)
396  {
397  	return kvm_set_irq_routing(kvm, empty_routing, 0, 0);
398  }
399  
kvm_arch_post_irq_routing_update(struct kvm * kvm)400  void kvm_arch_post_irq_routing_update(struct kvm *kvm)
401  {
402  	if (!irqchip_split(kvm))
403  		return;
404  	kvm_make_scan_ioapic_request(kvm);
405  }
406  
kvm_scan_ioapic_routes(struct kvm_vcpu * vcpu,ulong * ioapic_handled_vectors)407  void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
408  			    ulong *ioapic_handled_vectors)
409  {
410  	struct kvm *kvm = vcpu->kvm;
411  	struct kvm_kernel_irq_routing_entry *entry;
412  	struct kvm_irq_routing_table *table;
413  	u32 i, nr_ioapic_pins;
414  	int idx;
415  
416  	idx = srcu_read_lock(&kvm->irq_srcu);
417  	table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
418  	nr_ioapic_pins = min_t(u32, table->nr_rt_entries,
419  			       kvm->arch.nr_reserved_ioapic_pins);
420  	for (i = 0; i < nr_ioapic_pins; ++i) {
421  		hlist_for_each_entry(entry, &table->map[i], link) {
422  			struct kvm_lapic_irq irq;
423  
424  			if (entry->type != KVM_IRQ_ROUTING_MSI)
425  				continue;
426  
427  			kvm_set_msi_irq(vcpu->kvm, entry, &irq);
428  
429  			if (irq.trig_mode &&
430  			    (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
431  						 irq.dest_id, irq.dest_mode) ||
432  			     kvm_apic_pending_eoi(vcpu, irq.vector)))
433  				__set_bit(irq.vector, ioapic_handled_vectors);
434  		}
435  	}
436  	srcu_read_unlock(&kvm->irq_srcu, idx);
437  }
438  
kvm_arch_irq_routing_update(struct kvm * kvm)439  void kvm_arch_irq_routing_update(struct kvm *kvm)
440  {
441  	kvm_hv_irq_routing_update(kvm);
442  }
443