1 /*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24 #include "priv.h"
25 #include "agp.h"
26
27 #include <core/option.h>
28 #include <core/pci.h>
29
30 void
nvkm_pci_msi_rearm(struct nvkm_device * device)31 nvkm_pci_msi_rearm(struct nvkm_device *device)
32 {
33 struct nvkm_pci *pci = device->pci;
34
35 if (pci && pci->msi)
36 pci->func->msi_rearm(pci);
37 }
38
39 u32
nvkm_pci_rd32(struct nvkm_pci * pci,u16 addr)40 nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
41 {
42 return pci->func->rd32(pci, addr);
43 }
44
45 void
nvkm_pci_wr08(struct nvkm_pci * pci,u16 addr,u8 data)46 nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data)
47 {
48 pci->func->wr08(pci, addr, data);
49 }
50
51 void
nvkm_pci_wr32(struct nvkm_pci * pci,u16 addr,u32 data)52 nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data)
53 {
54 pci->func->wr32(pci, addr, data);
55 }
56
57 u32
nvkm_pci_mask(struct nvkm_pci * pci,u16 addr,u32 mask,u32 value)58 nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value)
59 {
60 u32 data = pci->func->rd32(pci, addr);
61 pci->func->wr32(pci, addr, (data & ~mask) | value);
62 return data;
63 }
64
65 void
nvkm_pci_rom_shadow(struct nvkm_pci * pci,bool shadow)66 nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
67 {
68 u32 data = nvkm_pci_rd32(pci, 0x0050);
69 if (shadow)
70 data |= 0x00000001;
71 else
72 data &= ~0x00000001;
73 nvkm_pci_wr32(pci, 0x0050, data);
74 }
75
76 static int
nvkm_pci_fini(struct nvkm_subdev * subdev,bool suspend)77 nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
78 {
79 struct nvkm_pci *pci = nvkm_pci(subdev);
80
81 if (pci->agp.bridge)
82 nvkm_agp_fini(pci);
83
84 return 0;
85 }
86
87 static int
nvkm_pci_preinit(struct nvkm_subdev * subdev)88 nvkm_pci_preinit(struct nvkm_subdev *subdev)
89 {
90 struct nvkm_pci *pci = nvkm_pci(subdev);
91 if (pci->agp.bridge)
92 nvkm_agp_preinit(pci);
93 return 0;
94 }
95
96 static int
nvkm_pci_oneinit(struct nvkm_subdev * subdev)97 nvkm_pci_oneinit(struct nvkm_subdev *subdev)
98 {
99 struct nvkm_pci *pci = nvkm_pci(subdev);
100 int ret;
101
102 if (pci_is_pcie(pci->pdev)) {
103 ret = nvkm_pcie_oneinit(pci);
104 if (ret)
105 return ret;
106 }
107
108 return 0;
109 }
110
111 static int
nvkm_pci_init(struct nvkm_subdev * subdev)112 nvkm_pci_init(struct nvkm_subdev *subdev)
113 {
114 struct nvkm_pci *pci = nvkm_pci(subdev);
115 int ret;
116
117 if (pci->agp.bridge) {
118 ret = nvkm_agp_init(pci);
119 if (ret)
120 return ret;
121 } else if (pci_is_pcie(pci->pdev)) {
122 nvkm_pcie_init(pci);
123 }
124
125 if (pci->func->init)
126 pci->func->init(pci);
127
128 /* Ensure MSI interrupts are armed, for the case where there are
129 * already interrupts pending (for whatever reason) at load time.
130 */
131 if (pci->msi)
132 pci->func->msi_rearm(pci);
133
134 return 0;
135 }
136
137 static void *
nvkm_pci_dtor(struct nvkm_subdev * subdev)138 nvkm_pci_dtor(struct nvkm_subdev *subdev)
139 {
140 struct nvkm_pci *pci = nvkm_pci(subdev);
141
142 nvkm_agp_dtor(pci);
143
144 if (pci->msi)
145 pci_disable_msi(pci->pdev);
146
147 return nvkm_pci(subdev);
148 }
149
150 static const struct nvkm_subdev_func
151 nvkm_pci_func = {
152 .dtor = nvkm_pci_dtor,
153 .oneinit = nvkm_pci_oneinit,
154 .preinit = nvkm_pci_preinit,
155 .init = nvkm_pci_init,
156 .fini = nvkm_pci_fini,
157 };
158
159 int
nvkm_pci_new_(const struct nvkm_pci_func * func,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_pci ** ppci)160 nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
161 enum nvkm_subdev_type type, int inst, struct nvkm_pci **ppci)
162 {
163 struct nvkm_pci *pci;
164
165 if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
166 return -ENOMEM;
167 nvkm_subdev_ctor(&nvkm_pci_func, device, type, inst, &pci->subdev);
168 pci->func = func;
169 pci->pdev = device->func->pci(device)->pdev;
170 pci->pcie.speed = -1;
171 pci->pcie.width = -1;
172
173 if (device->type == NVKM_DEVICE_AGP)
174 nvkm_agp_ctor(pci);
175
176 switch (pci->pdev->device & 0x0ff0) {
177 case 0x00f0:
178 case 0x02e0:
179 /* BR02? NFI how these would be handled yet exactly */
180 break;
181 default:
182 switch (device->chipset) {
183 case 0xaa:
184 /* reported broken, nv also disable it */
185 break;
186 default:
187 pci->msi = true;
188 break;
189 }
190 }
191
192 #ifdef __BIG_ENDIAN
193 pci->msi = false;
194 #endif
195
196 pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
197 if (pci->msi && func->msi_rearm) {
198 pci->msi = pci_enable_msi(pci->pdev) == 0;
199 if (pci->msi)
200 nvkm_debug(&pci->subdev, "MSI enabled\n");
201 } else {
202 pci->msi = false;
203 }
204
205 return 0;
206 }
207