1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2015, 2016 ARM Ltd.
4  */
5 #ifndef __KVM_ARM_VGIC_MMIO_H__
6 #define __KVM_ARM_VGIC_MMIO_H__
7 
8 struct vgic_register_region {
9 	unsigned int reg_offset;
10 	unsigned int len;
11 	unsigned int bits_per_irq;
12 	unsigned int access_flags;
13 	union {
14 		unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
15 				      unsigned int len);
16 		unsigned long (*its_read)(struct kvm *kvm, struct vgic_its *its,
17 					  gpa_t addr, unsigned int len);
18 	};
19 	union {
20 		void (*write)(struct kvm_vcpu *vcpu, gpa_t addr,
21 			      unsigned int len, unsigned long val);
22 		void (*its_write)(struct kvm *kvm, struct vgic_its *its,
23 				  gpa_t addr, unsigned int len,
24 				  unsigned long val);
25 	};
26 	unsigned long (*uaccess_read)(struct kvm_vcpu *vcpu, gpa_t addr,
27 				      unsigned int len);
28 	union {
29 		int (*uaccess_write)(struct kvm_vcpu *vcpu, gpa_t addr,
30 				     unsigned int len, unsigned long val);
31 		int (*uaccess_its_write)(struct kvm *kvm, struct vgic_its *its,
32 					 gpa_t addr, unsigned int len,
33 					 unsigned long val);
34 	};
35 };
36 
37 extern const struct kvm_io_device_ops kvm_io_gic_ops;
38 
39 #define VGIC_ACCESS_8bit	1
40 #define VGIC_ACCESS_32bit	2
41 #define VGIC_ACCESS_64bit	4
42 
43 /*
44  * Generate a mask that covers the number of bytes required to address
45  * up to 1024 interrupts, each represented by <bits> bits. This assumes
46  * that <bits> is a power of two.
47  */
48 #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
49 
50 /*
51  * (addr & mask) gives us the _byte_ offset for the INT ID.
52  * We multiply this by 8 the get the _bit_ offset, then divide this by
53  * the number of bits to learn the actual INT ID.
54  * But instead of a division (which requires a "long long div" implementation),
55  * we shift by the binary logarithm of <bits>.
56  * This assumes that <bits> is a power of two.
57  */
58 #define VGIC_ADDR_TO_INTID(addr, bits)  (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
59 					8 >> ilog2(bits))
60 
61 /*
62  * Some VGIC registers store per-IRQ information, with a different number
63  * of bits per IRQ. For those registers this macro is used.
64  * The _WITH_LENGTH version instantiates registers with a fixed length
65  * and is mutually exclusive with the _PER_IRQ version.
66  */
67 #define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, ur, uw, bpi, acc)	\
68 	{								\
69 		.reg_offset = off,					\
70 		.bits_per_irq = bpi,					\
71 		.len = bpi * 1024 / 8,					\
72 		.access_flags = acc,					\
73 		.read = rd,						\
74 		.write = wr,						\
75 		.uaccess_read = ur,					\
76 		.uaccess_write = uw,					\
77 	}
78 
79 #define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc)		\
80 	{								\
81 		.reg_offset = off,					\
82 		.bits_per_irq = 0,					\
83 		.len = length,						\
84 		.access_flags = acc,					\
85 		.read = rd,						\
86 		.write = wr,						\
87 	}
88 
89 #define REGISTER_DESC_WITH_LENGTH_UACCESS(off, rd, wr, urd, uwr, length, acc) \
90 	{								\
91 		.reg_offset = off,					\
92 		.bits_per_irq = 0,					\
93 		.len = length,						\
94 		.access_flags = acc,					\
95 		.read = rd,						\
96 		.write = wr,						\
97 		.uaccess_read = urd,					\
98 		.uaccess_write = uwr,					\
99 	}
100 
101 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
102 
103 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
104 				unsigned long data);
105 
106 unsigned long extract_bytes(u64 data, unsigned int offset,
107 			    unsigned int num);
108 
109 u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
110 		     unsigned long val);
111 
112 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
113 				 gpa_t addr, unsigned int len);
114 
115 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
116 				 gpa_t addr, unsigned int len);
117 
118 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
119 			unsigned int len, unsigned long val);
120 
121 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
122 			       unsigned int len, unsigned long val);
123 
124 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, gpa_t addr,
125 				   unsigned int len);
126 
127 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
128 			   unsigned int len, unsigned long val);
129 
130 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
131 				    gpa_t addr, unsigned int len);
132 
133 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
134 			     gpa_t addr, unsigned int len,
135 			     unsigned long val);
136 
137 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
138 			     gpa_t addr, unsigned int len,
139 			     unsigned long val);
140 
141 int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
142 			       gpa_t addr, unsigned int len,
143 			       unsigned long val);
144 
145 int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
146 			       gpa_t addr, unsigned int len,
147 			       unsigned long val);
148 
149 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
150 				     gpa_t addr, unsigned int len);
151 
152 unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
153 					gpa_t addr, unsigned int len);
154 
155 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
156 			      gpa_t addr, unsigned int len,
157 			      unsigned long val);
158 
159 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
160 			      gpa_t addr, unsigned int len,
161 			      unsigned long val);
162 
163 int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
164 				gpa_t addr, unsigned int len,
165 				unsigned long val);
166 
167 int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
168 				gpa_t addr, unsigned int len,
169 				unsigned long val);
170 
171 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
172 				    gpa_t addr, unsigned int len);
173 
174 unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
175 				    gpa_t addr, unsigned int len);
176 
177 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
178 			     gpa_t addr, unsigned int len,
179 			     unsigned long val);
180 
181 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
182 			     gpa_t addr, unsigned int len,
183 			     unsigned long val);
184 
185 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
186 				    gpa_t addr, unsigned int len,
187 				    unsigned long val);
188 
189 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
190 				    gpa_t addr, unsigned int len,
191 				    unsigned long val);
192 
193 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
194 				      gpa_t addr, unsigned int len);
195 
196 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
197 			      gpa_t addr, unsigned int len,
198 			      unsigned long val);
199 
200 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
201 				    gpa_t addr, unsigned int len);
202 
203 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
204 			    gpa_t addr, unsigned int len,
205 			    unsigned long val);
206 
207 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
208 		 bool is_write, int offset, u32 *val);
209 
210 u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid);
211 
212 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
213 				    const u32 val);
214 
215 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
216 
217 unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
218 
219 u64 vgic_sanitise_outer_cacheability(u64 reg);
220 u64 vgic_sanitise_inner_cacheability(u64 reg);
221 u64 vgic_sanitise_shareability(u64 reg);
222 u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
223 			u64 (*sanitise_fn)(u64));
224 
225 /* Find the proper register handler entry given a certain address offset */
226 const struct vgic_register_region *
227 vgic_find_mmio_region(const struct vgic_register_region *regions,
228 		      int nr_regions, unsigned int offset);
229 
230 #endif
231