1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/alpha/kernel/core_apecs.c
4 *
5 * Rewritten for Apecs from the lca.c from:
6 *
7 * Written by David Mosberger (davidm@cs.arizona.edu) with some code
8 * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit
9 * bios code.
10 *
11 * Code common to all APECS core logic chips.
12 */
13
14 #define __EXTERN_INLINE inline
15 #include <asm/io.h>
16 #include <asm/core_apecs.h>
17 #undef __EXTERN_INLINE
18
19 #include <linux/types.h>
20 #include <linux/pci.h>
21 #include <linux/init.h>
22
23 #include <asm/ptrace.h>
24 #include <asm/smp.h>
25 #include <asm/mce.h>
26
27 #include "proto.h"
28 #include "pci_impl.h"
29
30 /*
31 * NOTE: Herein lie back-to-back mb instructions. They are magic.
32 * One plausible explanation is that the i/o controller does not properly
33 * handle the system transaction. Another involves timing. Ho hum.
34 */
35
36 /*
37 * BIOS32-style PCI interface:
38 */
39
40 #define DEBUG_CONFIG 0
41
42 #if DEBUG_CONFIG
43 # define DBGC(args) printk args
44 #else
45 # define DBGC(args)
46 #endif
47
48 #define vuip volatile unsigned int *
49
50 /*
51 * Given a bus, device, and function number, compute resulting
52 * configuration space address and setup the APECS_HAXR2 register
53 * accordingly. It is therefore not safe to have concurrent
54 * invocations to configuration space access routines, but there
55 * really shouldn't be any need for this.
56 *
57 * Type 0:
58 *
59 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
60 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
61 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
62 * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0|
63 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
64 *
65 * 31:11 Device select bit.
66 * 10:8 Function number
67 * 7:2 Register number
68 *
69 * Type 1:
70 *
71 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
72 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
73 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
74 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
75 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
76 *
77 * 31:24 reserved
78 * 23:16 bus number (8 bits = 128 possible buses)
79 * 15:11 Device number (5 bits)
80 * 10:8 function number
81 * 7:2 register number
82 *
83 * Notes:
84 * The function number selects which function of a multi-function device
85 * (e.g., SCSI and Ethernet).
86 *
87 * The register selects a DWORD (32 bit) register offset. Hence it
88 * doesn't get shifted by 2 bits as we want to "drop" the bottom two
89 * bits.
90 */
91
92 static int
mk_conf_addr(struct pci_bus * pbus,unsigned int device_fn,int where,unsigned long * pci_addr,unsigned char * type1)93 mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
94 unsigned long *pci_addr, unsigned char *type1)
95 {
96 unsigned long addr;
97 u8 bus = pbus->number;
98
99 DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
100 " pci_addr=0x%p, type1=0x%p)\n",
101 bus, device_fn, where, pci_addr, type1));
102
103 if (bus == 0) {
104 int device = device_fn >> 3;
105
106 /* type 0 configuration cycle: */
107
108 if (device > 20) {
109 DBGC(("mk_conf_addr: device (%d) > 20, returning -1\n",
110 device));
111 return -1;
112 }
113
114 *type1 = 0;
115 addr = (device_fn << 8) | (where);
116 } else {
117 /* type 1 configuration cycle: */
118 *type1 = 1;
119 addr = (bus << 16) | (device_fn << 8) | (where);
120 }
121 *pci_addr = addr;
122 DBGC(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
123 return 0;
124 }
125
126 static unsigned int
conf_read(unsigned long addr,unsigned char type1)127 conf_read(unsigned long addr, unsigned char type1)
128 {
129 unsigned long flags;
130 unsigned int stat0, value;
131 unsigned int haxr2 = 0;
132
133 local_irq_save(flags); /* avoid getting hit by machine check */
134
135 DBGC(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
136
137 /* Reset status register to avoid losing errors. */
138 stat0 = *(vuip)APECS_IOC_DCSR;
139 *(vuip)APECS_IOC_DCSR = stat0;
140 mb();
141 DBGC(("conf_read: APECS DCSR was 0x%x\n", stat0));
142
143 /* If Type1 access, must set HAE #2. */
144 if (type1) {
145 haxr2 = *(vuip)APECS_IOC_HAXR2;
146 mb();
147 *(vuip)APECS_IOC_HAXR2 = haxr2 | 1;
148 DBGC(("conf_read: TYPE1 access\n"));
149 }
150
151 draina();
152 mcheck_expected(0) = 1;
153 mcheck_taken(0) = 0;
154 mb();
155
156 /* Access configuration space. */
157
158 /* Some SRMs step on these registers during a machine check. */
159 asm volatile("ldl %0,%1; mb; mb" : "=r"(value) : "m"(*(vuip)addr)
160 : "$9", "$10", "$11", "$12", "$13", "$14", "memory");
161
162 if (mcheck_taken(0)) {
163 mcheck_taken(0) = 0;
164 value = 0xffffffffU;
165 mb();
166 }
167 mcheck_expected(0) = 0;
168 mb();
169
170 #if 1
171 /*
172 * david.rusling@reo.mts.dec.com. This code is needed for the
173 * EB64+ as it does not generate a machine check (why I don't
174 * know). When we build kernels for one particular platform
175 * then we can make this conditional on the type.
176 */
177 draina();
178
179 /* Now look for any errors. */
180 stat0 = *(vuip)APECS_IOC_DCSR;
181 DBGC(("conf_read: APECS DCSR after read 0x%x\n", stat0));
182
183 /* Is any error bit set? */
184 if (stat0 & 0xffe0U) {
185 /* If not NDEV, print status. */
186 if (!(stat0 & 0x0800)) {
187 printk("apecs.c:conf_read: got stat0=%x\n", stat0);
188 }
189
190 /* Reset error status. */
191 *(vuip)APECS_IOC_DCSR = stat0;
192 mb();
193 wrmces(0x7); /* reset machine check */
194 value = 0xffffffff;
195 }
196 #endif
197
198 /* If Type1 access, must reset HAE #2 so normal IO space ops work. */
199 if (type1) {
200 *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1;
201 mb();
202 }
203 local_irq_restore(flags);
204
205 return value;
206 }
207
208 static void
conf_write(unsigned long addr,unsigned int value,unsigned char type1)209 conf_write(unsigned long addr, unsigned int value, unsigned char type1)
210 {
211 unsigned long flags;
212 unsigned int stat0;
213 unsigned int haxr2 = 0;
214
215 local_irq_save(flags); /* avoid getting hit by machine check */
216
217 /* Reset status register to avoid losing errors. */
218 stat0 = *(vuip)APECS_IOC_DCSR;
219 *(vuip)APECS_IOC_DCSR = stat0;
220 mb();
221
222 /* If Type1 access, must set HAE #2. */
223 if (type1) {
224 haxr2 = *(vuip)APECS_IOC_HAXR2;
225 mb();
226 *(vuip)APECS_IOC_HAXR2 = haxr2 | 1;
227 }
228
229 draina();
230 mcheck_expected(0) = 1;
231 mb();
232
233 /* Access configuration space. */
234 *(vuip)addr = value;
235 mb();
236 mb(); /* magic */
237 mcheck_expected(0) = 0;
238 mb();
239
240 #if 1
241 /*
242 * david.rusling@reo.mts.dec.com. This code is needed for the
243 * EB64+ as it does not generate a machine check (why I don't
244 * know). When we build kernels for one particular platform
245 * then we can make this conditional on the type.
246 */
247 draina();
248
249 /* Now look for any errors. */
250 stat0 = *(vuip)APECS_IOC_DCSR;
251
252 /* Is any error bit set? */
253 if (stat0 & 0xffe0U) {
254 /* If not NDEV, print status. */
255 if (!(stat0 & 0x0800)) {
256 printk("apecs.c:conf_write: got stat0=%x\n", stat0);
257 }
258
259 /* Reset error status. */
260 *(vuip)APECS_IOC_DCSR = stat0;
261 mb();
262 wrmces(0x7); /* reset machine check */
263 }
264 #endif
265
266 /* If Type1 access, must reset HAE #2 so normal IO space ops work. */
267 if (type1) {
268 *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1;
269 mb();
270 }
271 local_irq_restore(flags);
272 }
273
274 static int
apecs_read_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)275 apecs_read_config(struct pci_bus *bus, unsigned int devfn, int where,
276 int size, u32 *value)
277 {
278 unsigned long addr, pci_addr;
279 unsigned char type1;
280 long mask;
281 int shift;
282
283 if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
284 return PCIBIOS_DEVICE_NOT_FOUND;
285
286 mask = (size - 1) * 8;
287 shift = (where & 3) * 8;
288 addr = (pci_addr << 5) + mask + APECS_CONF;
289 *value = conf_read(addr, type1) >> (shift);
290 return PCIBIOS_SUCCESSFUL;
291 }
292
293 static int
apecs_write_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)294 apecs_write_config(struct pci_bus *bus, unsigned int devfn, int where,
295 int size, u32 value)
296 {
297 unsigned long addr, pci_addr;
298 unsigned char type1;
299 long mask;
300
301 if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
302 return PCIBIOS_DEVICE_NOT_FOUND;
303
304 mask = (size - 1) * 8;
305 addr = (pci_addr << 5) + mask + APECS_CONF;
306 conf_write(addr, value << ((where & 3) * 8), type1);
307 return PCIBIOS_SUCCESSFUL;
308 }
309
310 struct pci_ops apecs_pci_ops =
311 {
312 .read = apecs_read_config,
313 .write = apecs_write_config,
314 };
315
316 void
apecs_pci_tbi(struct pci_controller * hose,dma_addr_t start,dma_addr_t end)317 apecs_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
318 {
319 wmb();
320 *(vip)APECS_IOC_TBIA = 0;
321 mb();
322 }
323
324 void __init
apecs_init_arch(void)325 apecs_init_arch(void)
326 {
327 struct pci_controller *hose;
328
329 /*
330 * Create our single hose.
331 */
332
333 pci_isa_hose = hose = alloc_pci_controller();
334 hose->io_space = &ioport_resource;
335 hose->mem_space = &iomem_resource;
336 hose->index = 0;
337
338 hose->sparse_mem_base = APECS_SPARSE_MEM - IDENT_ADDR;
339 hose->dense_mem_base = APECS_DENSE_MEM - IDENT_ADDR;
340 hose->sparse_io_base = APECS_IO - IDENT_ADDR;
341 hose->dense_io_base = 0;
342
343 /*
344 * Set up the PCI to main memory translation windows.
345 *
346 * Window 1 is direct access 1GB at 1GB
347 * Window 2 is scatter-gather 8MB at 8MB (for isa)
348 */
349 hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
350 SMP_CACHE_BYTES);
351 hose->sg_pci = NULL;
352 __direct_map_base = 0x40000000;
353 __direct_map_size = 0x40000000;
354
355 *(vuip)APECS_IOC_PB1R = __direct_map_base | 0x00080000;
356 *(vuip)APECS_IOC_PM1R = (__direct_map_size - 1) & 0xfff00000U;
357 *(vuip)APECS_IOC_TB1R = 0;
358
359 *(vuip)APECS_IOC_PB2R = hose->sg_isa->dma_base | 0x000c0000;
360 *(vuip)APECS_IOC_PM2R = (hose->sg_isa->size - 1) & 0xfff00000;
361 *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1;
362
363 apecs_pci_tbi(hose, 0, -1);
364
365 /*
366 * Finally, clear the HAXR2 register, which gets used
367 * for PCI Config Space accesses. That is the way
368 * we want to use it, and we do not want to depend on
369 * what ARC or SRM might have left behind...
370 */
371 *(vuip)APECS_IOC_HAXR2 = 0;
372 mb();
373 }
374
375 void
apecs_pci_clr_err(void)376 apecs_pci_clr_err(void)
377 {
378 unsigned int jd;
379
380 jd = *(vuip)APECS_IOC_DCSR;
381 if (jd & 0xffe0L) {
382 *(vuip)APECS_IOC_SEAR;
383 *(vuip)APECS_IOC_DCSR = jd | 0xffe1L;
384 mb();
385 *(vuip)APECS_IOC_DCSR;
386 }
387 *(vuip)APECS_IOC_TBIA = (unsigned int)APECS_IOC_TBIA;
388 mb();
389 *(vuip)APECS_IOC_TBIA;
390 }
391
392 void
apecs_machine_check(unsigned long vector,unsigned long la_ptr)393 apecs_machine_check(unsigned long vector, unsigned long la_ptr)
394 {
395 struct el_common *mchk_header;
396 struct el_apecs_procdata *mchk_procdata;
397 struct el_apecs_sysdata_mcheck *mchk_sysdata;
398
399 mchk_header = (struct el_common *)la_ptr;
400
401 mchk_procdata = (struct el_apecs_procdata *)
402 (la_ptr + mchk_header->proc_offset
403 - sizeof(mchk_procdata->paltemp));
404
405 mchk_sysdata = (struct el_apecs_sysdata_mcheck *)
406 (la_ptr + mchk_header->sys_offset);
407
408
409 /* Clear the error before any reporting. */
410 mb();
411 mb(); /* magic */
412 draina();
413 apecs_pci_clr_err();
414 wrmces(0x7); /* reset machine check pending flag */
415 mb();
416
417 process_mcheck_info(vector, la_ptr, "APECS",
418 (mcheck_expected(0)
419 && (mchk_sysdata->epic_dcsr & 0x0c00UL)));
420 }
421