1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Enable PCIe link L0s/L1 state and Clock Power Management
4 *
5 * Copyright (C) 2007 Intel
6 * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
7 * Copyright (C) Shaohua Li (shaohua.li@intel.com)
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/math.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/pci.h>
15 #include <linux/pci_regs.h>
16 #include <linux/errno.h>
17 #include <linux/pm.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/delay.h>
22 #include "../pci.h"
23
24 #ifdef MODULE_PARAM_PREFIX
25 #undef MODULE_PARAM_PREFIX
26 #endif
27 #define MODULE_PARAM_PREFIX "pcie_aspm."
28
29 /* Note: those are not register definitions */
30 #define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
31 #define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32 #define ASPM_STATE_L1 (4) /* L1 state */
33 #define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */
34 #define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */
35 #define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */
36 #define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */
37 #define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
38 #define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
39 #define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
40 ASPM_STATE_L1_2_MASK)
41 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
42 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
43 ASPM_STATE_L1SS)
44
45 struct pcie_link_state {
46 struct pci_dev *pdev; /* Upstream component of the Link */
47 struct pci_dev *downstream; /* Downstream component, function 0 */
48 struct pcie_link_state *root; /* pointer to the root port link */
49 struct pcie_link_state *parent; /* pointer to the parent Link state */
50 struct list_head sibling; /* node in link_list */
51
52 /* ASPM state */
53 u32 aspm_support:7; /* Supported ASPM state */
54 u32 aspm_enabled:7; /* Enabled ASPM state */
55 u32 aspm_capable:7; /* Capable ASPM state with latency */
56 u32 aspm_default:7; /* Default ASPM state by BIOS */
57 u32 aspm_disable:7; /* Disabled ASPM state */
58
59 /* Clock PM state */
60 u32 clkpm_capable:1; /* Clock PM capable? */
61 u32 clkpm_enabled:1; /* Current Clock PM state */
62 u32 clkpm_default:1; /* Default Clock PM state by BIOS */
63 u32 clkpm_disable:1; /* Clock PM disabled */
64 };
65
66 static int aspm_disabled, aspm_force;
67 static bool aspm_support_enabled = true;
68 static DEFINE_MUTEX(aspm_lock);
69 static LIST_HEAD(link_list);
70
71 #define POLICY_DEFAULT 0 /* BIOS default setting */
72 #define POLICY_PERFORMANCE 1 /* high performance */
73 #define POLICY_POWERSAVE 2 /* high power saving */
74 #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
75
76 #ifdef CONFIG_PCIEASPM_PERFORMANCE
77 static int aspm_policy = POLICY_PERFORMANCE;
78 #elif defined CONFIG_PCIEASPM_POWERSAVE
79 static int aspm_policy = POLICY_POWERSAVE;
80 #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
81 static int aspm_policy = POLICY_POWER_SUPERSAVE;
82 #else
83 static int aspm_policy;
84 #endif
85
86 static const char *policy_str[] = {
87 [POLICY_DEFAULT] = "default",
88 [POLICY_PERFORMANCE] = "performance",
89 [POLICY_POWERSAVE] = "powersave",
90 [POLICY_POWER_SUPERSAVE] = "powersupersave"
91 };
92
93 #define LINK_RETRAIN_TIMEOUT HZ
94
95 /*
96 * The L1 PM substate capability is only implemented in function 0 in a
97 * multi function device.
98 */
pci_function_0(struct pci_bus * linkbus)99 static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
100 {
101 struct pci_dev *child;
102
103 list_for_each_entry(child, &linkbus->devices, bus_list)
104 if (PCI_FUNC(child->devfn) == 0)
105 return child;
106 return NULL;
107 }
108
policy_to_aspm_state(struct pcie_link_state * link)109 static int policy_to_aspm_state(struct pcie_link_state *link)
110 {
111 switch (aspm_policy) {
112 case POLICY_PERFORMANCE:
113 /* Disable ASPM and Clock PM */
114 return 0;
115 case POLICY_POWERSAVE:
116 /* Enable ASPM L0s/L1 */
117 return (ASPM_STATE_L0S | ASPM_STATE_L1);
118 case POLICY_POWER_SUPERSAVE:
119 /* Enable Everything */
120 return ASPM_STATE_ALL;
121 case POLICY_DEFAULT:
122 return link->aspm_default;
123 }
124 return 0;
125 }
126
policy_to_clkpm_state(struct pcie_link_state * link)127 static int policy_to_clkpm_state(struct pcie_link_state *link)
128 {
129 switch (aspm_policy) {
130 case POLICY_PERFORMANCE:
131 /* Disable ASPM and Clock PM */
132 return 0;
133 case POLICY_POWERSAVE:
134 case POLICY_POWER_SUPERSAVE:
135 /* Enable Clock PM */
136 return 1;
137 case POLICY_DEFAULT:
138 return link->clkpm_default;
139 }
140 return 0;
141 }
142
pcie_set_clkpm_nocheck(struct pcie_link_state * link,int enable)143 static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
144 {
145 struct pci_dev *child;
146 struct pci_bus *linkbus = link->pdev->subordinate;
147 u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
148
149 list_for_each_entry(child, &linkbus->devices, bus_list)
150 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
151 PCI_EXP_LNKCTL_CLKREQ_EN,
152 val);
153 link->clkpm_enabled = !!enable;
154 }
155
pcie_set_clkpm(struct pcie_link_state * link,int enable)156 static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
157 {
158 /*
159 * Don't enable Clock PM if the link is not Clock PM capable
160 * or Clock PM is disabled
161 */
162 if (!link->clkpm_capable || link->clkpm_disable)
163 enable = 0;
164 /* Need nothing if the specified equals to current state */
165 if (link->clkpm_enabled == enable)
166 return;
167 pcie_set_clkpm_nocheck(link, enable);
168 }
169
pcie_clkpm_cap_init(struct pcie_link_state * link,int blacklist)170 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
171 {
172 int capable = 1, enabled = 1;
173 u32 reg32;
174 u16 reg16;
175 struct pci_dev *child;
176 struct pci_bus *linkbus = link->pdev->subordinate;
177
178 /* All functions should have the same cap and state, take the worst */
179 list_for_each_entry(child, &linkbus->devices, bus_list) {
180 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, ®32);
181 if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
182 capable = 0;
183 enabled = 0;
184 break;
185 }
186 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16);
187 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
188 enabled = 0;
189 }
190 link->clkpm_enabled = enabled;
191 link->clkpm_default = enabled;
192 link->clkpm_capable = capable;
193 link->clkpm_disable = blacklist ? 1 : 0;
194 }
195
pcie_retrain_link(struct pcie_link_state * link)196 static bool pcie_retrain_link(struct pcie_link_state *link)
197 {
198 struct pci_dev *parent = link->pdev;
199 unsigned long end_jiffies;
200 u16 reg16;
201
202 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16);
203 reg16 |= PCI_EXP_LNKCTL_RL;
204 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
205 if (parent->clear_retrain_link) {
206 /*
207 * Due to an erratum in some devices the Retrain Link bit
208 * needs to be cleared again manually to allow the link
209 * training to succeed.
210 */
211 reg16 &= ~PCI_EXP_LNKCTL_RL;
212 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
213 }
214
215 /* Wait for link training end. Break out after waiting for timeout */
216 end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
217 do {
218 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16);
219 if (!(reg16 & PCI_EXP_LNKSTA_LT))
220 break;
221 msleep(1);
222 } while (time_before(jiffies, end_jiffies));
223 return !(reg16 & PCI_EXP_LNKSTA_LT);
224 }
225
226 /*
227 * pcie_aspm_configure_common_clock: check if the 2 ends of a link
228 * could use common clock. If they are, configure them to use the
229 * common clock. That will reduce the ASPM state exit latency.
230 */
pcie_aspm_configure_common_clock(struct pcie_link_state * link)231 static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
232 {
233 int same_clock = 1;
234 u16 reg16, parent_reg, child_reg[8];
235 struct pci_dev *child, *parent = link->pdev;
236 struct pci_bus *linkbus = parent->subordinate;
237 /*
238 * All functions of a slot should have the same Slot Clock
239 * Configuration, so just check one function
240 */
241 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
242 BUG_ON(!pci_is_pcie(child));
243
244 /* Check downstream component if bit Slot Clock Configuration is 1 */
245 pcie_capability_read_word(child, PCI_EXP_LNKSTA, ®16);
246 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
247 same_clock = 0;
248
249 /* Check upstream component if bit Slot Clock Configuration is 1 */
250 pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16);
251 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
252 same_clock = 0;
253
254 /* Port might be already in common clock mode */
255 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16);
256 if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
257 bool consistent = true;
258
259 list_for_each_entry(child, &linkbus->devices, bus_list) {
260 pcie_capability_read_word(child, PCI_EXP_LNKCTL,
261 ®16);
262 if (!(reg16 & PCI_EXP_LNKCTL_CCC)) {
263 consistent = false;
264 break;
265 }
266 }
267 if (consistent)
268 return;
269 pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
270 }
271
272 /* Configure downstream component, all functions */
273 list_for_each_entry(child, &linkbus->devices, bus_list) {
274 pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16);
275 child_reg[PCI_FUNC(child->devfn)] = reg16;
276 if (same_clock)
277 reg16 |= PCI_EXP_LNKCTL_CCC;
278 else
279 reg16 &= ~PCI_EXP_LNKCTL_CCC;
280 pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16);
281 }
282
283 /* Configure upstream component */
284 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16);
285 parent_reg = reg16;
286 if (same_clock)
287 reg16 |= PCI_EXP_LNKCTL_CCC;
288 else
289 reg16 &= ~PCI_EXP_LNKCTL_CCC;
290 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
291
292 if (pcie_retrain_link(link))
293 return;
294
295 /* Training failed. Restore common clock configurations */
296 pci_err(parent, "ASPM: Could not configure common clock\n");
297 list_for_each_entry(child, &linkbus->devices, bus_list)
298 pcie_capability_write_word(child, PCI_EXP_LNKCTL,
299 child_reg[PCI_FUNC(child->devfn)]);
300 pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
301 }
302
303 /* Convert L0s latency encoding to ns */
calc_l0s_latency(u32 lnkcap)304 static u32 calc_l0s_latency(u32 lnkcap)
305 {
306 u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
307
308 if (encoding == 0x7)
309 return (5 * 1000); /* > 4us */
310 return (64 << encoding);
311 }
312
313 /* Convert L0s acceptable latency encoding to ns */
calc_l0s_acceptable(u32 encoding)314 static u32 calc_l0s_acceptable(u32 encoding)
315 {
316 if (encoding == 0x7)
317 return -1U;
318 return (64 << encoding);
319 }
320
321 /* Convert L1 latency encoding to ns */
calc_l1_latency(u32 lnkcap)322 static u32 calc_l1_latency(u32 lnkcap)
323 {
324 u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
325
326 if (encoding == 0x7)
327 return (65 * 1000); /* > 64us */
328 return (1000 << encoding);
329 }
330
331 /* Convert L1 acceptable latency encoding to ns */
calc_l1_acceptable(u32 encoding)332 static u32 calc_l1_acceptable(u32 encoding)
333 {
334 if (encoding == 0x7)
335 return -1U;
336 return (1000 << encoding);
337 }
338
339 /* Convert L1SS T_pwr encoding to usec */
calc_l1ss_pwron(struct pci_dev * pdev,u32 scale,u32 val)340 static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
341 {
342 switch (scale) {
343 case 0:
344 return val * 2;
345 case 1:
346 return val * 10;
347 case 2:
348 return val * 100;
349 }
350 pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale);
351 return 0;
352 }
353
354 /*
355 * Encode an LTR_L1.2_THRESHOLD value for the L1 PM Substates Control 1
356 * register. Ports enter L1.2 when the most recent LTR value is greater
357 * than or equal to LTR_L1.2_THRESHOLD, so we round up to make sure we
358 * don't enter L1.2 too aggressively.
359 *
360 * See PCIe r6.0, sec 5.5.1, 6.18, 7.8.3.3.
361 */
encode_l12_threshold(u32 threshold_us,u32 * scale,u32 * value)362 static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
363 {
364 u64 threshold_ns = (u64) threshold_us * 1000;
365
366 /*
367 * LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max
368 * value of 0x3ff.
369 */
370 if (threshold_ns <= 0x3ff * 1) {
371 *scale = 0; /* Value times 1ns */
372 *value = threshold_ns;
373 } else if (threshold_ns <= 0x3ff * 32) {
374 *scale = 1; /* Value times 32ns */
375 *value = roundup(threshold_ns, 32) / 32;
376 } else if (threshold_ns <= 0x3ff * 1024) {
377 *scale = 2; /* Value times 1024ns */
378 *value = roundup(threshold_ns, 1024) / 1024;
379 } else if (threshold_ns <= 0x3ff * 32768) {
380 *scale = 3; /* Value times 32768ns */
381 *value = roundup(threshold_ns, 32768) / 32768;
382 } else if (threshold_ns <= 0x3ff * 1048576) {
383 *scale = 4; /* Value times 1048576ns */
384 *value = roundup(threshold_ns, 1048576) / 1048576;
385 } else if (threshold_ns <= 0x3ff * (u64) 33554432) {
386 *scale = 5; /* Value times 33554432ns */
387 *value = roundup(threshold_ns, 33554432) / 33554432;
388 } else {
389 *scale = 5;
390 *value = 0x3ff; /* Max representable value */
391 }
392 }
393
pcie_aspm_check_latency(struct pci_dev * endpoint)394 static void pcie_aspm_check_latency(struct pci_dev *endpoint)
395 {
396 u32 latency, encoding, lnkcap_up, lnkcap_dw;
397 u32 l1_switch_latency = 0, latency_up_l0s;
398 u32 latency_up_l1, latency_dw_l0s, latency_dw_l1;
399 u32 acceptable_l0s, acceptable_l1;
400 struct pcie_link_state *link;
401
402 /* Device not in D0 doesn't need latency check */
403 if ((endpoint->current_state != PCI_D0) &&
404 (endpoint->current_state != PCI_UNKNOWN))
405 return;
406
407 link = endpoint->bus->self->link_state;
408
409 /* Calculate endpoint L0s acceptable latency */
410 encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L0S) >> 6;
411 acceptable_l0s = calc_l0s_acceptable(encoding);
412
413 /* Calculate endpoint L1 acceptable latency */
414 encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L1) >> 9;
415 acceptable_l1 = calc_l1_acceptable(encoding);
416
417 while (link) {
418 struct pci_dev *dev = pci_function_0(link->pdev->subordinate);
419
420 /* Read direction exit latencies */
421 pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP,
422 &lnkcap_up);
423 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP,
424 &lnkcap_dw);
425 latency_up_l0s = calc_l0s_latency(lnkcap_up);
426 latency_up_l1 = calc_l1_latency(lnkcap_up);
427 latency_dw_l0s = calc_l0s_latency(lnkcap_dw);
428 latency_dw_l1 = calc_l1_latency(lnkcap_dw);
429
430 /* Check upstream direction L0s latency */
431 if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
432 (latency_up_l0s > acceptable_l0s))
433 link->aspm_capable &= ~ASPM_STATE_L0S_UP;
434
435 /* Check downstream direction L0s latency */
436 if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
437 (latency_dw_l0s > acceptable_l0s))
438 link->aspm_capable &= ~ASPM_STATE_L0S_DW;
439 /*
440 * Check L1 latency.
441 * Every switch on the path to root complex need 1
442 * more microsecond for L1. Spec doesn't mention L0s.
443 *
444 * The exit latencies for L1 substates are not advertised
445 * by a device. Since the spec also doesn't mention a way
446 * to determine max latencies introduced by enabling L1
447 * substates on the components, it is not clear how to do
448 * a L1 substate exit latency check. We assume that the
449 * L1 exit latencies advertised by a device include L1
450 * substate latencies (and hence do not do any check).
451 */
452 latency = max_t(u32, latency_up_l1, latency_dw_l1);
453 if ((link->aspm_capable & ASPM_STATE_L1) &&
454 (latency + l1_switch_latency > acceptable_l1))
455 link->aspm_capable &= ~ASPM_STATE_L1;
456 l1_switch_latency += 1000;
457
458 link = link->parent;
459 }
460 }
461
pci_clear_and_set_dword(struct pci_dev * pdev,int pos,u32 clear,u32 set)462 static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
463 u32 clear, u32 set)
464 {
465 u32 val;
466
467 pci_read_config_dword(pdev, pos, &val);
468 val &= ~clear;
469 val |= set;
470 pci_write_config_dword(pdev, pos, val);
471 }
472
473 /* Calculate L1.2 PM substate timing parameters */
aspm_calc_l1ss_info(struct pcie_link_state * link,u32 parent_l1ss_cap,u32 child_l1ss_cap)474 static void aspm_calc_l1ss_info(struct pcie_link_state *link,
475 u32 parent_l1ss_cap, u32 child_l1ss_cap)
476 {
477 struct pci_dev *child = link->downstream, *parent = link->pdev;
478 u32 val1, val2, scale1, scale2;
479 u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
480 u32 ctl1 = 0, ctl2 = 0;
481 u32 pctl1, pctl2, cctl1, cctl2;
482 u32 pl1_2_enables, cl1_2_enables;
483
484 if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
485 return;
486
487 /* Choose the greater of the two Port Common_Mode_Restore_Times */
488 val1 = (parent_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
489 val2 = (child_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
490 t_common_mode = max(val1, val2);
491
492 /* Choose the greater of the two Port T_POWER_ON times */
493 val1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
494 scale1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
495 val2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
496 scale2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
497
498 if (calc_l1ss_pwron(parent, scale1, val1) >
499 calc_l1ss_pwron(child, scale2, val2)) {
500 ctl2 |= scale1 | (val1 << 3);
501 t_power_on = calc_l1ss_pwron(parent, scale1, val1);
502 } else {
503 ctl2 |= scale2 | (val2 << 3);
504 t_power_on = calc_l1ss_pwron(child, scale2, val2);
505 }
506
507 /*
508 * Set LTR_L1.2_THRESHOLD to the time required to transition the
509 * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
510 * downstream devices report (via LTR) that they can tolerate at
511 * least that much latency.
512 *
513 * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
514 * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at
515 * least 4us.
516 */
517 l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
518 encode_l12_threshold(l1_2_threshold, &scale, &value);
519 ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
520
521 /* Some broken devices only support dword access to L1 SS */
522 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
523 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
524 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
525 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2);
526
527 if (ctl1 == pctl1 && ctl1 == cctl1 &&
528 ctl2 == pctl2 && ctl2 == cctl2)
529 return;
530
531 /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
532 pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
533 cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
534
535 if (pl1_2_enables || cl1_2_enables) {
536 pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
537 PCI_L1SS_CTL1_L1_2_MASK, 0);
538 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
539 PCI_L1SS_CTL1_L1_2_MASK, 0);
540 }
541
542 /* Program T_POWER_ON times in both ports */
543 pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
544 pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
545
546 /* Program Common_Mode_Restore_Time in upstream device */
547 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
548 PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
549
550 /* Program LTR_L1.2_THRESHOLD time in both ports */
551 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
552 PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
553 PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
554 pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
555 PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
556 PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
557
558 if (pl1_2_enables || cl1_2_enables) {
559 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
560 pl1_2_enables);
561 pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
562 cl1_2_enables);
563 }
564 }
565
aspm_l1ss_init(struct pcie_link_state * link)566 static void aspm_l1ss_init(struct pcie_link_state *link)
567 {
568 struct pci_dev *child = link->downstream, *parent = link->pdev;
569 u32 parent_l1ss_cap, child_l1ss_cap;
570 u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
571
572 if (!parent->l1ss || !child->l1ss)
573 return;
574
575 /* Setup L1 substate */
576 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
577 &parent_l1ss_cap);
578 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
579 &child_l1ss_cap);
580
581 if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
582 parent_l1ss_cap = 0;
583 if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
584 child_l1ss_cap = 0;
585
586 /*
587 * If we don't have LTR for the entire path from the Root Complex
588 * to this device, we can't use ASPM L1.2 because it relies on the
589 * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
590 */
591 if (!child->ltr_path)
592 child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
593
594 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
595 link->aspm_support |= ASPM_STATE_L1_1;
596 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
597 link->aspm_support |= ASPM_STATE_L1_2;
598 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
599 link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
600 if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
601 link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
602
603 if (parent_l1ss_cap)
604 pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
605 &parent_l1ss_ctl1);
606 if (child_l1ss_cap)
607 pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
608 &child_l1ss_ctl1);
609
610 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
611 link->aspm_enabled |= ASPM_STATE_L1_1;
612 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
613 link->aspm_enabled |= ASPM_STATE_L1_2;
614 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
615 link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
616 if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
617 link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
618
619 if (link->aspm_support & ASPM_STATE_L1SS)
620 aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap);
621 }
622
pcie_aspm_cap_init(struct pcie_link_state * link,int blacklist)623 static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
624 {
625 struct pci_dev *child = link->downstream, *parent = link->pdev;
626 u32 parent_lnkcap, child_lnkcap;
627 u16 parent_lnkctl, child_lnkctl;
628 struct pci_bus *linkbus = parent->subordinate;
629
630 if (blacklist) {
631 /* Set enabled/disable so that we will disable ASPM later */
632 link->aspm_enabled = ASPM_STATE_ALL;
633 link->aspm_disable = ASPM_STATE_ALL;
634 return;
635 }
636
637 /*
638 * If ASPM not supported, don't mess with the clocks and link,
639 * bail out now.
640 */
641 pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
642 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
643 if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
644 return;
645
646 /* Configure common clock before checking latencies */
647 pcie_aspm_configure_common_clock(link);
648
649 /*
650 * Re-read upstream/downstream components' register state after
651 * clock configuration. L0s & L1 exit latencies in the otherwise
652 * read-only Link Capabilities may change depending on common clock
653 * configuration (PCIe r5.0, sec 7.5.3.6).
654 */
655 pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
656 pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
657 pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
658 pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
659
660 /*
661 * Setup L0s state
662 *
663 * Note that we must not enable L0s in either direction on a
664 * given link unless components on both sides of the link each
665 * support L0s.
666 */
667 if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
668 link->aspm_support |= ASPM_STATE_L0S;
669
670 if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
671 link->aspm_enabled |= ASPM_STATE_L0S_UP;
672 if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
673 link->aspm_enabled |= ASPM_STATE_L0S_DW;
674
675 /* Setup L1 state */
676 if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
677 link->aspm_support |= ASPM_STATE_L1;
678
679 if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
680 link->aspm_enabled |= ASPM_STATE_L1;
681
682 aspm_l1ss_init(link);
683
684 /* Save default state */
685 link->aspm_default = link->aspm_enabled;
686
687 /* Setup initial capable state. Will be updated later */
688 link->aspm_capable = link->aspm_support;
689
690 /* Get and check endpoint acceptable latencies */
691 list_for_each_entry(child, &linkbus->devices, bus_list) {
692 if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
693 pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
694 continue;
695
696 pcie_aspm_check_latency(child);
697 }
698 }
699
700 /* Configure the ASPM L1 substates */
pcie_config_aspm_l1ss(struct pcie_link_state * link,u32 state)701 static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
702 {
703 u32 val, enable_req;
704 struct pci_dev *child = link->downstream, *parent = link->pdev;
705
706 enable_req = (link->aspm_enabled ^ state) & state;
707
708 /*
709 * Here are the rules specified in the PCIe spec for enabling L1SS:
710 * - When enabling L1.x, enable bit at parent first, then at child
711 * - When disabling L1.x, disable bit at child first, then at parent
712 * - When enabling ASPM L1.x, need to disable L1
713 * (at child followed by parent).
714 * - The ASPM/PCIPM L1.2 must be disabled while programming timing
715 * parameters
716 *
717 * To keep it simple, disable all L1SS bits first, and later enable
718 * what is needed.
719 */
720
721 /* Disable all L1 substates */
722 pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
723 PCI_L1SS_CTL1_L1SS_MASK, 0);
724 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
725 PCI_L1SS_CTL1_L1SS_MASK, 0);
726 /*
727 * If needed, disable L1, and it gets enabled later
728 * in pcie_config_aspm_link().
729 */
730 if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
731 pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
732 PCI_EXP_LNKCTL_ASPM_L1, 0);
733 pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
734 PCI_EXP_LNKCTL_ASPM_L1, 0);
735 }
736
737 val = 0;
738 if (state & ASPM_STATE_L1_1)
739 val |= PCI_L1SS_CTL1_ASPM_L1_1;
740 if (state & ASPM_STATE_L1_2)
741 val |= PCI_L1SS_CTL1_ASPM_L1_2;
742 if (state & ASPM_STATE_L1_1_PCIPM)
743 val |= PCI_L1SS_CTL1_PCIPM_L1_1;
744 if (state & ASPM_STATE_L1_2_PCIPM)
745 val |= PCI_L1SS_CTL1_PCIPM_L1_2;
746
747 /* Enable what we need to enable */
748 pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
749 PCI_L1SS_CTL1_L1SS_MASK, val);
750 pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
751 PCI_L1SS_CTL1_L1SS_MASK, val);
752 }
753
pcie_config_aspm_dev(struct pci_dev * pdev,u32 val)754 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
755 {
756 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
757 PCI_EXP_LNKCTL_ASPMC, val);
758 }
759
pcie_config_aspm_link(struct pcie_link_state * link,u32 state)760 static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
761 {
762 u32 upstream = 0, dwstream = 0;
763 struct pci_dev *child = link->downstream, *parent = link->pdev;
764 struct pci_bus *linkbus = parent->subordinate;
765
766 /* Enable only the states that were not explicitly disabled */
767 state &= (link->aspm_capable & ~link->aspm_disable);
768
769 /* Can't enable any substates if L1 is not enabled */
770 if (!(state & ASPM_STATE_L1))
771 state &= ~ASPM_STATE_L1SS;
772
773 /* Spec says both ports must be in D0 before enabling PCI PM substates*/
774 if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
775 state &= ~ASPM_STATE_L1_SS_PCIPM;
776 state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
777 }
778
779 /* Nothing to do if the link is already in the requested state */
780 if (link->aspm_enabled == state)
781 return;
782 /* Convert ASPM state to upstream/downstream ASPM register state */
783 if (state & ASPM_STATE_L0S_UP)
784 dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
785 if (state & ASPM_STATE_L0S_DW)
786 upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
787 if (state & ASPM_STATE_L1) {
788 upstream |= PCI_EXP_LNKCTL_ASPM_L1;
789 dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
790 }
791
792 if (link->aspm_capable & ASPM_STATE_L1SS)
793 pcie_config_aspm_l1ss(link, state);
794
795 /*
796 * Spec 2.0 suggests all functions should be configured the
797 * same setting for ASPM. Enabling ASPM L1 should be done in
798 * upstream component first and then downstream, and vice
799 * versa for disabling ASPM L1. Spec doesn't mention L0S.
800 */
801 if (state & ASPM_STATE_L1)
802 pcie_config_aspm_dev(parent, upstream);
803 list_for_each_entry(child, &linkbus->devices, bus_list)
804 pcie_config_aspm_dev(child, dwstream);
805 if (!(state & ASPM_STATE_L1))
806 pcie_config_aspm_dev(parent, upstream);
807
808 link->aspm_enabled = state;
809 }
810
pcie_config_aspm_path(struct pcie_link_state * link)811 static void pcie_config_aspm_path(struct pcie_link_state *link)
812 {
813 while (link) {
814 pcie_config_aspm_link(link, policy_to_aspm_state(link));
815 link = link->parent;
816 }
817 }
818
free_link_state(struct pcie_link_state * link)819 static void free_link_state(struct pcie_link_state *link)
820 {
821 link->pdev->link_state = NULL;
822 kfree(link);
823 }
824
pcie_aspm_sanity_check(struct pci_dev * pdev)825 static int pcie_aspm_sanity_check(struct pci_dev *pdev)
826 {
827 struct pci_dev *child;
828 u32 reg32;
829
830 /*
831 * Some functions in a slot might not all be PCIe functions,
832 * very strange. Disable ASPM for the whole slot
833 */
834 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
835 if (!pci_is_pcie(child))
836 return -EINVAL;
837
838 /*
839 * If ASPM is disabled then we're not going to change
840 * the BIOS state. It's safe to continue even if it's a
841 * pre-1.1 device
842 */
843
844 if (aspm_disabled)
845 continue;
846
847 /*
848 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
849 * RBER bit to determine if a function is 1.1 version device
850 */
851 pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32);
852 if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
853 pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n");
854 return -EINVAL;
855 }
856 }
857 return 0;
858 }
859
alloc_pcie_link_state(struct pci_dev * pdev)860 static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
861 {
862 struct pcie_link_state *link;
863
864 link = kzalloc(sizeof(*link), GFP_KERNEL);
865 if (!link)
866 return NULL;
867
868 INIT_LIST_HEAD(&link->sibling);
869 link->pdev = pdev;
870 link->downstream = pci_function_0(pdev->subordinate);
871
872 /*
873 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
874 * hierarchies. Note that some PCIe host implementations omit
875 * the root ports entirely, in which case a downstream port on
876 * a switch may become the root of the link state chain for all
877 * its subordinate endpoints.
878 */
879 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
880 pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
881 !pdev->bus->parent->self) {
882 link->root = link;
883 } else {
884 struct pcie_link_state *parent;
885
886 parent = pdev->bus->parent->self->link_state;
887 if (!parent) {
888 kfree(link);
889 return NULL;
890 }
891
892 link->parent = parent;
893 link->root = link->parent->root;
894 }
895
896 list_add(&link->sibling, &link_list);
897 pdev->link_state = link;
898 return link;
899 }
900
pcie_aspm_update_sysfs_visibility(struct pci_dev * pdev)901 static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
902 {
903 struct pci_dev *child;
904
905 list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
906 sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
907 }
908
909 /*
910 * pcie_aspm_init_link_state: Initiate PCI express link state.
911 * It is called after the pcie and its children devices are scanned.
912 * @pdev: the root port or switch downstream port
913 */
pcie_aspm_init_link_state(struct pci_dev * pdev)914 void pcie_aspm_init_link_state(struct pci_dev *pdev)
915 {
916 struct pcie_link_state *link;
917 int blacklist = !!pcie_aspm_sanity_check(pdev);
918
919 if (!aspm_support_enabled)
920 return;
921
922 if (pdev->link_state)
923 return;
924
925 /*
926 * We allocate pcie_link_state for the component on the upstream
927 * end of a Link, so there's nothing to do unless this device is
928 * downstream port.
929 */
930 if (!pcie_downstream_port(pdev))
931 return;
932
933 /* VIA has a strange chipset, root port is under a bridge */
934 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
935 pdev->bus->self)
936 return;
937
938 down_read(&pci_bus_sem);
939 if (list_empty(&pdev->subordinate->devices))
940 goto out;
941
942 mutex_lock(&aspm_lock);
943 link = alloc_pcie_link_state(pdev);
944 if (!link)
945 goto unlock;
946 /*
947 * Setup initial ASPM state. Note that we need to configure
948 * upstream links also because capable state of them can be
949 * update through pcie_aspm_cap_init().
950 */
951 pcie_aspm_cap_init(link, blacklist);
952
953 /* Setup initial Clock PM state */
954 pcie_clkpm_cap_init(link, blacklist);
955
956 /*
957 * At this stage drivers haven't had an opportunity to change the
958 * link policy setting. Enabling ASPM on broken hardware can cripple
959 * it even before the driver has had a chance to disable ASPM, so
960 * default to a safe level right now. If we're enabling ASPM beyond
961 * the BIOS's expectation, we'll do so once pci_enable_device() is
962 * called.
963 */
964 if (aspm_policy != POLICY_POWERSAVE &&
965 aspm_policy != POLICY_POWER_SUPERSAVE) {
966 pcie_config_aspm_path(link);
967 pcie_set_clkpm(link, policy_to_clkpm_state(link));
968 }
969
970 pcie_aspm_update_sysfs_visibility(pdev);
971
972 unlock:
973 mutex_unlock(&aspm_lock);
974 out:
975 up_read(&pci_bus_sem);
976 }
977
978 /* Recheck latencies and update aspm_capable for links under the root */
pcie_update_aspm_capable(struct pcie_link_state * root)979 static void pcie_update_aspm_capable(struct pcie_link_state *root)
980 {
981 struct pcie_link_state *link;
982 BUG_ON(root->parent);
983 list_for_each_entry(link, &link_list, sibling) {
984 if (link->root != root)
985 continue;
986 link->aspm_capable = link->aspm_support;
987 }
988 list_for_each_entry(link, &link_list, sibling) {
989 struct pci_dev *child;
990 struct pci_bus *linkbus = link->pdev->subordinate;
991 if (link->root != root)
992 continue;
993 list_for_each_entry(child, &linkbus->devices, bus_list) {
994 if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
995 (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
996 continue;
997 pcie_aspm_check_latency(child);
998 }
999 }
1000 }
1001
1002 /* @pdev: the endpoint device */
pcie_aspm_exit_link_state(struct pci_dev * pdev)1003 void pcie_aspm_exit_link_state(struct pci_dev *pdev)
1004 {
1005 struct pci_dev *parent = pdev->bus->self;
1006 struct pcie_link_state *link, *root, *parent_link;
1007
1008 if (!parent || !parent->link_state)
1009 return;
1010
1011 down_read(&pci_bus_sem);
1012 mutex_lock(&aspm_lock);
1013 /*
1014 * All PCIe functions are in one slot, remove one function will remove
1015 * the whole slot, so just wait until we are the last function left.
1016 */
1017 if (!list_empty(&parent->subordinate->devices))
1018 goto out;
1019
1020 link = parent->link_state;
1021 root = link->root;
1022 parent_link = link->parent;
1023
1024 /* All functions are removed, so just disable ASPM for the link */
1025 pcie_config_aspm_link(link, 0);
1026 list_del(&link->sibling);
1027 /* Clock PM is for endpoint device */
1028 free_link_state(link);
1029
1030 /* Recheck latencies and configure upstream links */
1031 if (parent_link) {
1032 pcie_update_aspm_capable(root);
1033 pcie_config_aspm_path(parent_link);
1034 }
1035 out:
1036 mutex_unlock(&aspm_lock);
1037 up_read(&pci_bus_sem);
1038 }
1039
pcie_aspm_powersave_config_link(struct pci_dev * pdev)1040 void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
1041 {
1042 struct pcie_link_state *link = pdev->link_state;
1043
1044 if (aspm_disabled || !link)
1045 return;
1046
1047 if (aspm_policy != POLICY_POWERSAVE &&
1048 aspm_policy != POLICY_POWER_SUPERSAVE)
1049 return;
1050
1051 down_read(&pci_bus_sem);
1052 mutex_lock(&aspm_lock);
1053 pcie_config_aspm_path(link);
1054 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1055 mutex_unlock(&aspm_lock);
1056 up_read(&pci_bus_sem);
1057 }
1058
pcie_aspm_get_link(struct pci_dev * pdev)1059 static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
1060 {
1061 struct pci_dev *bridge;
1062
1063 if (!pci_is_pcie(pdev))
1064 return NULL;
1065
1066 bridge = pci_upstream_bridge(pdev);
1067 if (!bridge || !pci_is_pcie(bridge))
1068 return NULL;
1069
1070 return bridge->link_state;
1071 }
1072
__pci_disable_link_state(struct pci_dev * pdev,int state,bool sem)1073 static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
1074 {
1075 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1076
1077 if (!link)
1078 return -EINVAL;
1079 /*
1080 * A driver requested that ASPM be disabled on this device, but
1081 * if we don't have permission to manage ASPM (e.g., on ACPI
1082 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1083 * the _OSC method), we can't honor that request. Windows has
1084 * a similar mechanism using "PciASPMOptOut", which is also
1085 * ignored in this situation.
1086 */
1087 if (aspm_disabled) {
1088 pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n");
1089 return -EPERM;
1090 }
1091
1092 if (sem)
1093 down_read(&pci_bus_sem);
1094 mutex_lock(&aspm_lock);
1095 if (state & PCIE_LINK_STATE_L0S)
1096 link->aspm_disable |= ASPM_STATE_L0S;
1097 if (state & PCIE_LINK_STATE_L1)
1098 /* L1 PM substates require L1 */
1099 link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
1100 if (state & PCIE_LINK_STATE_L1_1)
1101 link->aspm_disable |= ASPM_STATE_L1_1;
1102 if (state & PCIE_LINK_STATE_L1_2)
1103 link->aspm_disable |= ASPM_STATE_L1_2;
1104 if (state & PCIE_LINK_STATE_L1_1_PCIPM)
1105 link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
1106 if (state & PCIE_LINK_STATE_L1_2_PCIPM)
1107 link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
1108 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1109
1110 if (state & PCIE_LINK_STATE_CLKPM)
1111 link->clkpm_disable = 1;
1112 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1113 mutex_unlock(&aspm_lock);
1114 if (sem)
1115 up_read(&pci_bus_sem);
1116
1117 return 0;
1118 }
1119
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1120 int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1121 {
1122 return __pci_disable_link_state(pdev, state, false);
1123 }
1124 EXPORT_SYMBOL(pci_disable_link_state_locked);
1125
1126 /**
1127 * pci_disable_link_state - Disable device's link state, so the link will
1128 * never enter specific states. Note that if the BIOS didn't grant ASPM
1129 * control to the OS, this does nothing because we can't touch the LNKCTL
1130 * register. Returns 0 or a negative errno.
1131 *
1132 * @pdev: PCI device
1133 * @state: ASPM link state to disable
1134 */
pci_disable_link_state(struct pci_dev * pdev,int state)1135 int pci_disable_link_state(struct pci_dev *pdev, int state)
1136 {
1137 return __pci_disable_link_state(pdev, state, true);
1138 }
1139 EXPORT_SYMBOL(pci_disable_link_state);
1140
1141 /**
1142 * pci_enable_link_state - Clear and set the default device link state so that
1143 * the link may be allowed to enter the specified states. Note that if the
1144 * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
1145 * touch the LNKCTL register. Also note that this does not enable states
1146 * disabled by pci_disable_link_state(). Return 0 or a negative errno.
1147 *
1148 * @pdev: PCI device
1149 * @state: Mask of ASPM link states to enable
1150 */
pci_enable_link_state(struct pci_dev * pdev,int state)1151 int pci_enable_link_state(struct pci_dev *pdev, int state)
1152 {
1153 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1154
1155 if (!link)
1156 return -EINVAL;
1157 /*
1158 * A driver requested that ASPM be enabled on this device, but
1159 * if we don't have permission to manage ASPM (e.g., on ACPI
1160 * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
1161 * the _OSC method), we can't honor that request.
1162 */
1163 if (aspm_disabled) {
1164 pci_warn(pdev, "can't override BIOS ASPM; OS doesn't have ASPM control\n");
1165 return -EPERM;
1166 }
1167
1168 down_read(&pci_bus_sem);
1169 mutex_lock(&aspm_lock);
1170 link->aspm_default = 0;
1171 if (state & PCIE_LINK_STATE_L0S)
1172 link->aspm_default |= ASPM_STATE_L0S;
1173 if (state & PCIE_LINK_STATE_L1)
1174 /* L1 PM substates require L1 */
1175 link->aspm_default |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
1176 if (state & PCIE_LINK_STATE_L1_1)
1177 link->aspm_default |= ASPM_STATE_L1_1;
1178 if (state & PCIE_LINK_STATE_L1_2)
1179 link->aspm_default |= ASPM_STATE_L1_2;
1180 if (state & PCIE_LINK_STATE_L1_1_PCIPM)
1181 link->aspm_default |= ASPM_STATE_L1_1_PCIPM;
1182 if (state & PCIE_LINK_STATE_L1_2_PCIPM)
1183 link->aspm_default |= ASPM_STATE_L1_2_PCIPM;
1184 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1185
1186 link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
1187 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1188 mutex_unlock(&aspm_lock);
1189 up_read(&pci_bus_sem);
1190
1191 return 0;
1192 }
1193 EXPORT_SYMBOL(pci_enable_link_state);
1194
pcie_aspm_set_policy(const char * val,const struct kernel_param * kp)1195 static int pcie_aspm_set_policy(const char *val,
1196 const struct kernel_param *kp)
1197 {
1198 int i;
1199 struct pcie_link_state *link;
1200
1201 if (aspm_disabled)
1202 return -EPERM;
1203 i = sysfs_match_string(policy_str, val);
1204 if (i < 0)
1205 return i;
1206 if (i == aspm_policy)
1207 return 0;
1208
1209 down_read(&pci_bus_sem);
1210 mutex_lock(&aspm_lock);
1211 aspm_policy = i;
1212 list_for_each_entry(link, &link_list, sibling) {
1213 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1214 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1215 }
1216 mutex_unlock(&aspm_lock);
1217 up_read(&pci_bus_sem);
1218 return 0;
1219 }
1220
pcie_aspm_get_policy(char * buffer,const struct kernel_param * kp)1221 static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
1222 {
1223 int i, cnt = 0;
1224 for (i = 0; i < ARRAY_SIZE(policy_str); i++)
1225 if (i == aspm_policy)
1226 cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
1227 else
1228 cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
1229 cnt += sprintf(buffer + cnt, "\n");
1230 return cnt;
1231 }
1232
1233 module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
1234 NULL, 0644);
1235
1236 /**
1237 * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
1238 * @pdev: Target device.
1239 *
1240 * Relies on the upstream bridge's link_state being valid. The link_state
1241 * is deallocated only when the last child of the bridge (i.e., @pdev or a
1242 * sibling) is removed, and the caller should be holding a reference to
1243 * @pdev, so this should be safe.
1244 */
pcie_aspm_enabled(struct pci_dev * pdev)1245 bool pcie_aspm_enabled(struct pci_dev *pdev)
1246 {
1247 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1248
1249 if (!link)
1250 return false;
1251
1252 return link->aspm_enabled;
1253 }
1254 EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
1255
aspm_attr_show_common(struct device * dev,struct device_attribute * attr,char * buf,u8 state)1256 static ssize_t aspm_attr_show_common(struct device *dev,
1257 struct device_attribute *attr,
1258 char *buf, u8 state)
1259 {
1260 struct pci_dev *pdev = to_pci_dev(dev);
1261 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1262
1263 return sysfs_emit(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
1264 }
1265
aspm_attr_store_common(struct device * dev,struct device_attribute * attr,const char * buf,size_t len,u8 state)1266 static ssize_t aspm_attr_store_common(struct device *dev,
1267 struct device_attribute *attr,
1268 const char *buf, size_t len, u8 state)
1269 {
1270 struct pci_dev *pdev = to_pci_dev(dev);
1271 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1272 bool state_enable;
1273
1274 if (kstrtobool(buf, &state_enable) < 0)
1275 return -EINVAL;
1276
1277 down_read(&pci_bus_sem);
1278 mutex_lock(&aspm_lock);
1279
1280 if (state_enable) {
1281 link->aspm_disable &= ~state;
1282 /* need to enable L1 for substates */
1283 if (state & ASPM_STATE_L1SS)
1284 link->aspm_disable &= ~ASPM_STATE_L1;
1285 } else {
1286 link->aspm_disable |= state;
1287 }
1288
1289 pcie_config_aspm_link(link, policy_to_aspm_state(link));
1290
1291 mutex_unlock(&aspm_lock);
1292 up_read(&pci_bus_sem);
1293
1294 return len;
1295 }
1296
1297 #define ASPM_ATTR(_f, _s) \
1298 static ssize_t _f##_show(struct device *dev, \
1299 struct device_attribute *attr, char *buf) \
1300 { return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \
1301 \
1302 static ssize_t _f##_store(struct device *dev, \
1303 struct device_attribute *attr, \
1304 const char *buf, size_t len) \
1305 { return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
1306
ASPM_ATTR(l0s_aspm,L0S)1307 ASPM_ATTR(l0s_aspm, L0S)
1308 ASPM_ATTR(l1_aspm, L1)
1309 ASPM_ATTR(l1_1_aspm, L1_1)
1310 ASPM_ATTR(l1_2_aspm, L1_2)
1311 ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
1312 ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
1313
1314 static ssize_t clkpm_show(struct device *dev,
1315 struct device_attribute *attr, char *buf)
1316 {
1317 struct pci_dev *pdev = to_pci_dev(dev);
1318 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1319
1320 return sysfs_emit(buf, "%d\n", link->clkpm_enabled);
1321 }
1322
clkpm_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1323 static ssize_t clkpm_store(struct device *dev,
1324 struct device_attribute *attr,
1325 const char *buf, size_t len)
1326 {
1327 struct pci_dev *pdev = to_pci_dev(dev);
1328 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1329 bool state_enable;
1330
1331 if (kstrtobool(buf, &state_enable) < 0)
1332 return -EINVAL;
1333
1334 down_read(&pci_bus_sem);
1335 mutex_lock(&aspm_lock);
1336
1337 link->clkpm_disable = !state_enable;
1338 pcie_set_clkpm(link, policy_to_clkpm_state(link));
1339
1340 mutex_unlock(&aspm_lock);
1341 up_read(&pci_bus_sem);
1342
1343 return len;
1344 }
1345
1346 static DEVICE_ATTR_RW(clkpm);
1347 static DEVICE_ATTR_RW(l0s_aspm);
1348 static DEVICE_ATTR_RW(l1_aspm);
1349 static DEVICE_ATTR_RW(l1_1_aspm);
1350 static DEVICE_ATTR_RW(l1_2_aspm);
1351 static DEVICE_ATTR_RW(l1_1_pcipm);
1352 static DEVICE_ATTR_RW(l1_2_pcipm);
1353
1354 static struct attribute *aspm_ctrl_attrs[] = {
1355 &dev_attr_clkpm.attr,
1356 &dev_attr_l0s_aspm.attr,
1357 &dev_attr_l1_aspm.attr,
1358 &dev_attr_l1_1_aspm.attr,
1359 &dev_attr_l1_2_aspm.attr,
1360 &dev_attr_l1_1_pcipm.attr,
1361 &dev_attr_l1_2_pcipm.attr,
1362 NULL
1363 };
1364
aspm_ctrl_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1365 static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
1366 struct attribute *a, int n)
1367 {
1368 struct device *dev = kobj_to_dev(kobj);
1369 struct pci_dev *pdev = to_pci_dev(dev);
1370 struct pcie_link_state *link = pcie_aspm_get_link(pdev);
1371 static const u8 aspm_state_map[] = {
1372 ASPM_STATE_L0S,
1373 ASPM_STATE_L1,
1374 ASPM_STATE_L1_1,
1375 ASPM_STATE_L1_2,
1376 ASPM_STATE_L1_1_PCIPM,
1377 ASPM_STATE_L1_2_PCIPM,
1378 };
1379
1380 if (aspm_disabled || !link)
1381 return 0;
1382
1383 if (n == 0)
1384 return link->clkpm_capable ? a->mode : 0;
1385
1386 return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
1387 }
1388
1389 const struct attribute_group aspm_ctrl_attr_group = {
1390 .name = "link",
1391 .attrs = aspm_ctrl_attrs,
1392 .is_visible = aspm_ctrl_attrs_are_visible,
1393 };
1394
pcie_aspm_disable(char * str)1395 static int __init pcie_aspm_disable(char *str)
1396 {
1397 if (!strcmp(str, "off")) {
1398 aspm_policy = POLICY_DEFAULT;
1399 aspm_disabled = 1;
1400 aspm_support_enabled = false;
1401 printk(KERN_INFO "PCIe ASPM is disabled\n");
1402 } else if (!strcmp(str, "force")) {
1403 aspm_force = 1;
1404 printk(KERN_INFO "PCIe ASPM is forcibly enabled\n");
1405 }
1406 return 1;
1407 }
1408
1409 __setup("pcie_aspm=", pcie_aspm_disable);
1410
pcie_no_aspm(void)1411 void pcie_no_aspm(void)
1412 {
1413 /*
1414 * Disabling ASPM is intended to prevent the kernel from modifying
1415 * existing hardware state, not to clear existing state. To that end:
1416 * (a) set policy to POLICY_DEFAULT in order to avoid changing state
1417 * (b) prevent userspace from changing policy
1418 */
1419 if (!aspm_force) {
1420 aspm_policy = POLICY_DEFAULT;
1421 aspm_disabled = 1;
1422 }
1423 }
1424
pcie_aspm_support_enabled(void)1425 bool pcie_aspm_support_enabled(void)
1426 {
1427 return aspm_support_enabled;
1428 }
1429