1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2008-2014 Freescale Semiconductor, Inc.
4 * Copyright 2021 NXP
5 */
6
7 /*
8 * Generic driver for Freescale DDR/DDR2/DDR3 memory controller.
9 * Based on code from spd_sdram.c
10 * Author: James Yang [at freescale.com]
11 */
12
13 #include <common.h>
14 #include <display_options.h>
15 #include <dm.h>
16 #include <i2c.h>
17 #include <fsl_ddr_sdram.h>
18 #include <fsl_ddr.h>
19 #include <init.h>
20 #include <log.h>
21 #include <asm/bitops.h>
22
23 /*
24 * CFG_SYS_FSL_DDR_SDRAM_BASE_PHY is the physical address from the view
25 * of DDR controllers. It is the same as CFG_SYS_DDR_SDRAM_BASE for
26 * all Power SoCs. But it could be different for ARM SoCs. For example,
27 * fsl_lsch3 has a mapping mechanism to map DDR memory to ranges (in order) of
28 * 0x00_8000_0000 ~ 0x00_ffff_ffff
29 * 0x80_8000_0000 ~ 0xff_ffff_ffff
30 */
31 #ifndef CFG_SYS_FSL_DDR_SDRAM_BASE_PHY
32 #ifdef CONFIG_MPC83xx
33 #define CFG_SYS_FSL_DDR_SDRAM_BASE_PHY CFG_SYS_SDRAM_BASE
34 #else
35 #define CFG_SYS_FSL_DDR_SDRAM_BASE_PHY CFG_SYS_DDR_SDRAM_BASE
36 #endif
37 #endif
38
39 #ifdef CONFIG_PPC
40 #include <asm/fsl_law.h>
41
42 void fsl_ddr_set_lawbar(
43 const common_timing_params_t *memctl_common_params,
44 unsigned int memctl_interleaved,
45 unsigned int ctrl_num);
46 #endif
47
48 void fsl_ddr_set_intl3r(const unsigned int granule_size);
49 #if defined(SPD_EEPROM_ADDRESS) || \
50 defined(SPD_EEPROM_ADDRESS1) || defined(SPD_EEPROM_ADDRESS2) || \
51 defined(SPD_EEPROM_ADDRESS3) || defined(SPD_EEPROM_ADDRESS4)
52 #if (CONFIG_SYS_NUM_DDR_CTLRS == 1) && (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
53 u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = {
54 [0][0] = SPD_EEPROM_ADDRESS,
55 };
56 #elif (CONFIG_SYS_NUM_DDR_CTLRS == 1) && (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
57 u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = {
58 [0][0] = SPD_EEPROM_ADDRESS1, /* controller 1 */
59 [0][1] = SPD_EEPROM_ADDRESS2, /* controller 1 */
60 };
61 #elif (CONFIG_SYS_NUM_DDR_CTLRS == 2) && (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
62 u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = {
63 [0][0] = SPD_EEPROM_ADDRESS1, /* controller 1 */
64 [1][0] = SPD_EEPROM_ADDRESS2, /* controller 2 */
65 };
66 #elif (CONFIG_SYS_NUM_DDR_CTLRS == 2) && (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
67 u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = {
68 [0][0] = SPD_EEPROM_ADDRESS1, /* controller 1 */
69 [0][1] = SPD_EEPROM_ADDRESS2, /* controller 1 */
70 [1][0] = SPD_EEPROM_ADDRESS3, /* controller 2 */
71 [1][1] = SPD_EEPROM_ADDRESS4, /* controller 2 */
72 };
73 #elif (CONFIG_SYS_NUM_DDR_CTLRS == 3) && (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
74 u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = {
75 [0][0] = SPD_EEPROM_ADDRESS1, /* controller 1 */
76 [1][0] = SPD_EEPROM_ADDRESS2, /* controller 2 */
77 [2][0] = SPD_EEPROM_ADDRESS3, /* controller 3 */
78 };
79 #elif (CONFIG_SYS_NUM_DDR_CTLRS == 3) && (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
80 u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = {
81 [0][0] = SPD_EEPROM_ADDRESS1, /* controller 1 */
82 [0][1] = SPD_EEPROM_ADDRESS2, /* controller 1 */
83 [1][0] = SPD_EEPROM_ADDRESS3, /* controller 2 */
84 [1][1] = SPD_EEPROM_ADDRESS4, /* controller 2 */
85 [2][0] = SPD_EEPROM_ADDRESS5, /* controller 3 */
86 [2][1] = SPD_EEPROM_ADDRESS6, /* controller 3 */
87 };
88
89 #endif
90
91 #if CONFIG_IS_ENABLED(DM_I2C)
92 #define DEV_TYPE struct udevice
93 #else
94 /* Local udevice */
95 struct ludevice {
96 u8 chip;
97 };
98
99 #define DEV_TYPE struct ludevice
100
101 #endif
102
103 #define SPD_SPA0_ADDRESS 0x36
104 #define SPD_SPA1_ADDRESS 0x37
105
ddr_i2c_read(DEV_TYPE * dev,unsigned int addr,int alen,uint8_t * buf,int len)106 static int ddr_i2c_read(DEV_TYPE *dev, unsigned int addr,
107 int alen, uint8_t *buf, int len)
108 {
109 int ret;
110
111 #if CONFIG_IS_ENABLED(DM_I2C)
112 ret = dm_i2c_read(dev, 0, buf, len);
113 #else
114 ret = i2c_read(dev->chip, addr, alen, buf, len);
115 #endif
116
117 return ret;
118 }
119
120 #ifdef CONFIG_SYS_FSL_DDR4
ddr_i2c_dummy_write(unsigned int chip_addr)121 static int ddr_i2c_dummy_write(unsigned int chip_addr)
122 {
123 uint8_t buf = 0;
124
125 #if CONFIG_IS_ENABLED(DM_I2C)
126 struct udevice *dev;
127 int ret;
128
129 ret = i2c_get_chip_for_busnum(CONFIG_SYS_SPD_BUS_NUM, chip_addr,
130 1, &dev);
131 if (ret) {
132 printf("%s: Cannot find udev for a bus %d\n", __func__,
133 CONFIG_SYS_SPD_BUS_NUM);
134 return ret;
135 }
136
137 return dm_i2c_write(dev, 0, &buf, 1);
138 #else
139 return i2c_write(chip_addr, 0, 1, &buf, 1);
140 #endif
141
142 return 0;
143 }
144 #endif
145
__get_spd(generic_spd_eeprom_t * spd,u8 i2c_address)146 static void __get_spd(generic_spd_eeprom_t *spd, u8 i2c_address)
147 {
148 int ret;
149 DEV_TYPE *dev;
150
151 #if CONFIG_IS_ENABLED(DM_I2C)
152 ret = i2c_get_chip_for_busnum(CONFIG_SYS_SPD_BUS_NUM, i2c_address,
153 1, &dev);
154 if (ret) {
155 printf("%s: Cannot find udev for a bus %d\n", __func__,
156 CONFIG_SYS_SPD_BUS_NUM);
157 return;
158 }
159 #else /* Non DM I2C support - will be removed */
160 struct ludevice ldev = {
161 .chip = i2c_address,
162 };
163 dev = &ldev;
164
165 i2c_set_bus_num(CONFIG_SYS_SPD_BUS_NUM);
166 #endif
167
168 #ifdef CONFIG_SYS_FSL_DDR4
169 /*
170 * DDR4 SPD has 384 to 512 bytes
171 * To access the lower 256 bytes, we need to set EE page address to 0
172 * To access the upper 256 bytes, we need to set EE page address to 1
173 * See Jedec standar No. 21-C for detail
174 */
175 ddr_i2c_dummy_write(SPD_SPA0_ADDRESS);
176 ret = ddr_i2c_read(dev, 0, 1, (uchar *)spd, 256);
177 if (!ret) {
178 ddr_i2c_dummy_write(SPD_SPA1_ADDRESS);
179 ret = ddr_i2c_read(dev, 0, 1, (uchar *)((ulong)spd + 256),
180 min(256,
181 (int)sizeof(generic_spd_eeprom_t)
182 - 256));
183 }
184
185 #else
186 ret = ddr_i2c_read(dev, 0, 1, (uchar *)spd,
187 sizeof(generic_spd_eeprom_t));
188 #endif
189
190 if (ret) {
191 if (i2c_address ==
192 #ifdef SPD_EEPROM_ADDRESS
193 SPD_EEPROM_ADDRESS
194 #elif defined(SPD_EEPROM_ADDRESS1)
195 SPD_EEPROM_ADDRESS1
196 #endif
197 ) {
198 printf("DDR: failed to read SPD from address %u\n",
199 i2c_address);
200 } else {
201 debug("DDR: failed to read SPD from address %u\n",
202 i2c_address);
203 }
204 memset(spd, 0, sizeof(generic_spd_eeprom_t));
205 }
206 }
207
208 __attribute__((weak, alias("__get_spd")))
209 void get_spd(generic_spd_eeprom_t *spd, u8 i2c_address);
210
211 /* This function allows boards to update SPD address */
update_spd_address(unsigned int ctrl_num,unsigned int slot,unsigned int * addr)212 __weak void update_spd_address(unsigned int ctrl_num,
213 unsigned int slot,
214 unsigned int *addr)
215 {
216 }
217
fsl_ddr_get_spd(generic_spd_eeprom_t * ctrl_dimms_spd,unsigned int ctrl_num,unsigned int dimm_slots_per_ctrl)218 void fsl_ddr_get_spd(generic_spd_eeprom_t *ctrl_dimms_spd,
219 unsigned int ctrl_num, unsigned int dimm_slots_per_ctrl)
220 {
221 unsigned int i;
222 unsigned int i2c_address = 0;
223
224 if (ctrl_num >= CONFIG_SYS_NUM_DDR_CTLRS) {
225 printf("%s unexpected ctrl_num = %u\n", __FUNCTION__, ctrl_num);
226 return;
227 }
228
229 for (i = 0; i < dimm_slots_per_ctrl; i++) {
230 i2c_address = spd_i2c_addr[ctrl_num][i];
231 update_spd_address(ctrl_num, i, &i2c_address);
232 get_spd(&(ctrl_dimms_spd[i]), i2c_address);
233 }
234 }
235 #else
fsl_ddr_get_spd(generic_spd_eeprom_t * ctrl_dimms_spd,unsigned int ctrl_num,unsigned int dimm_slots_per_ctrl)236 void fsl_ddr_get_spd(generic_spd_eeprom_t *ctrl_dimms_spd,
237 unsigned int ctrl_num, unsigned int dimm_slots_per_ctrl)
238 {
239 }
240 #endif /* SPD_EEPROM_ADDRESSx */
241
242 /*
243 * ASSUMPTIONS:
244 * - Same number of CONFIG_DIMM_SLOTS_PER_CTLR on each controller
245 * - Same memory data bus width on all controllers
246 *
247 * NOTES:
248 *
249 * The memory controller and associated documentation use confusing
250 * terminology when referring to the orgranization of DRAM.
251 *
252 * Here is a terminology translation table:
253 *
254 * memory controller/documention |industry |this code |signals
255 * -------------------------------|-----------|-----------|-----------------
256 * physical bank/bank |rank |rank |chip select (CS)
257 * logical bank/sub-bank |bank |bank |bank address (BA)
258 * page/row |row |page |row address
259 * ??? |column |column |column address
260 *
261 * The naming confusion is further exacerbated by the descriptions of the
262 * memory controller interleaving feature, where accesses are interleaved
263 * _BETWEEN_ two seperate memory controllers. This is configured only in
264 * CS0_CONFIG[INTLV_CTL] of each memory controller.
265 *
266 * memory controller documentation | number of chip selects
267 * | per memory controller supported
268 * --------------------------------|-----------------------------------------
269 * cache line interleaving | 1 (CS0 only)
270 * page interleaving | 1 (CS0 only)
271 * bank interleaving | 1 (CS0 only)
272 * superbank interleraving | depends on bank (chip select)
273 * | interleraving [rank interleaving]
274 * | mode used on every memory controller
275 *
276 * Even further confusing is the existence of the interleaving feature
277 * _WITHIN_ each memory controller. The feature is referred to in
278 * documentation as chip select interleaving or bank interleaving,
279 * although it is configured in the DDR_SDRAM_CFG field.
280 *
281 * Name of field | documentation name | this code
282 * -----------------------------|-----------------------|------------------
283 * DDR_SDRAM_CFG[BA_INTLV_CTL] | Bank (chip select) | rank interleaving
284 * | interleaving
285 */
286
287 const char *step_string_tbl[] = {
288 "STEP_GET_SPD",
289 "STEP_COMPUTE_DIMM_PARMS",
290 "STEP_COMPUTE_COMMON_PARMS",
291 "STEP_GATHER_OPTS",
292 "STEP_ASSIGN_ADDRESSES",
293 "STEP_COMPUTE_REGS",
294 "STEP_PROGRAM_REGS",
295 "STEP_ALL"
296 };
297
step_to_string(unsigned int step)298 const char * step_to_string(unsigned int step) {
299
300 unsigned int s = __ilog2(step);
301
302 if (s <= 31) {
303 if ((1 << s) != step)
304 return step_string_tbl[7];
305 } else {
306 if ((1 << (s - 32)) != step)
307 return step_string_tbl[7];
308 }
309 if (s >= ARRAY_SIZE(step_string_tbl)) {
310 printf("Error for the step in %s\n", __func__);
311 s = 0;
312 }
313
314 return step_string_tbl[s];
315 }
316
__step_assign_addresses(fsl_ddr_info_t * pinfo,unsigned int dbw_cap_adj[])317 static unsigned long long __step_assign_addresses(fsl_ddr_info_t *pinfo,
318 unsigned int dbw_cap_adj[])
319 {
320 unsigned int i, j;
321 unsigned long long total_mem, current_mem_base, total_ctlr_mem;
322 unsigned long long rank_density, ctlr_density = 0;
323 unsigned int first_ctrl = pinfo->first_ctrl;
324 unsigned int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
325
326 /*
327 * If a reduced data width is requested, but the SPD
328 * specifies a physically wider device, adjust the
329 * computed dimm capacities accordingly before
330 * assigning addresses.
331 */
332 for (i = first_ctrl; i <= last_ctrl; i++) {
333 unsigned int found = 0;
334
335 switch (pinfo->memctl_opts[i].data_bus_width) {
336 case 2:
337 /* 16-bit */
338 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
339 unsigned int dw;
340 if (!pinfo->dimm_params[i][j].n_ranks)
341 continue;
342 dw = pinfo->dimm_params[i][j].primary_sdram_width;
343 if ((dw == 72 || dw == 64)) {
344 dbw_cap_adj[i] = 2;
345 break;
346 } else if ((dw == 40 || dw == 32)) {
347 dbw_cap_adj[i] = 1;
348 break;
349 }
350 }
351 break;
352
353 case 1:
354 /* 32-bit */
355 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
356 unsigned int dw;
357 dw = pinfo->dimm_params[i][j].data_width;
358 if (pinfo->dimm_params[i][j].n_ranks
359 && (dw == 72 || dw == 64)) {
360 /*
361 * FIXME: can't really do it
362 * like this because this just
363 * further reduces the memory
364 */
365 found = 1;
366 break;
367 }
368 }
369 if (found) {
370 dbw_cap_adj[i] = 1;
371 }
372 break;
373
374 case 0:
375 /* 64-bit */
376 break;
377
378 default:
379 printf("unexpected data bus width "
380 "specified controller %u\n", i);
381 return 1;
382 }
383 debug("dbw_cap_adj[%d]=%d\n", i, dbw_cap_adj[i]);
384 }
385
386 current_mem_base = pinfo->mem_base;
387 total_mem = 0;
388 if (pinfo->memctl_opts[first_ctrl].memctl_interleaving) {
389 rank_density = pinfo->dimm_params[first_ctrl][0].rank_density >>
390 dbw_cap_adj[first_ctrl];
391 switch (pinfo->memctl_opts[first_ctrl].ba_intlv_ctl &
392 FSL_DDR_CS0_CS1_CS2_CS3) {
393 case FSL_DDR_CS0_CS1_CS2_CS3:
394 ctlr_density = 4 * rank_density;
395 break;
396 case FSL_DDR_CS0_CS1:
397 case FSL_DDR_CS0_CS1_AND_CS2_CS3:
398 ctlr_density = 2 * rank_density;
399 break;
400 case FSL_DDR_CS2_CS3:
401 default:
402 ctlr_density = rank_density;
403 break;
404 }
405 debug("rank density is 0x%llx, ctlr density is 0x%llx\n",
406 rank_density, ctlr_density);
407 for (i = first_ctrl; i <= last_ctrl; i++) {
408 if (pinfo->memctl_opts[i].memctl_interleaving) {
409 switch (pinfo->memctl_opts[i].memctl_interleaving_mode) {
410 case FSL_DDR_256B_INTERLEAVING:
411 case FSL_DDR_CACHE_LINE_INTERLEAVING:
412 case FSL_DDR_PAGE_INTERLEAVING:
413 case FSL_DDR_BANK_INTERLEAVING:
414 case FSL_DDR_SUPERBANK_INTERLEAVING:
415 total_ctlr_mem = 2 * ctlr_density;
416 break;
417 case FSL_DDR_3WAY_1KB_INTERLEAVING:
418 case FSL_DDR_3WAY_4KB_INTERLEAVING:
419 case FSL_DDR_3WAY_8KB_INTERLEAVING:
420 total_ctlr_mem = 3 * ctlr_density;
421 break;
422 case FSL_DDR_4WAY_1KB_INTERLEAVING:
423 case FSL_DDR_4WAY_4KB_INTERLEAVING:
424 case FSL_DDR_4WAY_8KB_INTERLEAVING:
425 total_ctlr_mem = 4 * ctlr_density;
426 break;
427 default:
428 panic("Unknown interleaving mode");
429 }
430 pinfo->common_timing_params[i].base_address =
431 current_mem_base;
432 pinfo->common_timing_params[i].total_mem =
433 total_ctlr_mem;
434 total_mem = current_mem_base + total_ctlr_mem;
435 debug("ctrl %d base 0x%llx\n", i, current_mem_base);
436 debug("ctrl %d total 0x%llx\n", i, total_ctlr_mem);
437 } else {
438 /* when 3rd controller not interleaved */
439 current_mem_base = total_mem;
440 total_ctlr_mem = 0;
441 pinfo->common_timing_params[i].base_address =
442 current_mem_base;
443 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
444 unsigned long long cap =
445 pinfo->dimm_params[i][j].capacity >> dbw_cap_adj[i];
446 pinfo->dimm_params[i][j].base_address =
447 current_mem_base;
448 debug("ctrl %d dimm %d base 0x%llx\n", i, j, current_mem_base);
449 current_mem_base += cap;
450 total_ctlr_mem += cap;
451 }
452 debug("ctrl %d total 0x%llx\n", i, total_ctlr_mem);
453 pinfo->common_timing_params[i].total_mem =
454 total_ctlr_mem;
455 total_mem += total_ctlr_mem;
456 }
457 }
458 } else {
459 /*
460 * Simple linear assignment if memory
461 * controllers are not interleaved.
462 */
463 for (i = first_ctrl; i <= last_ctrl; i++) {
464 total_ctlr_mem = 0;
465 pinfo->common_timing_params[i].base_address =
466 current_mem_base;
467 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
468 /* Compute DIMM base addresses. */
469 unsigned long long cap =
470 pinfo->dimm_params[i][j].capacity >> dbw_cap_adj[i];
471 pinfo->dimm_params[i][j].base_address =
472 current_mem_base;
473 debug("ctrl %d dimm %d base 0x%llx\n", i, j, current_mem_base);
474 current_mem_base += cap;
475 total_ctlr_mem += cap;
476 }
477 debug("ctrl %d total 0x%llx\n", i, total_ctlr_mem);
478 pinfo->common_timing_params[i].total_mem =
479 total_ctlr_mem;
480 total_mem += total_ctlr_mem;
481 }
482 }
483 debug("Total mem by %s is 0x%llx\n", __func__, total_mem);
484
485 return total_mem;
486 }
487
488 /* Use weak function to allow board file to override the address assignment */
489 __attribute__((weak, alias("__step_assign_addresses")))
490 unsigned long long step_assign_addresses(fsl_ddr_info_t *pinfo,
491 unsigned int dbw_cap_adj[]);
492
493 unsigned long long
fsl_ddr_compute(fsl_ddr_info_t * pinfo,unsigned int start_step,unsigned int size_only)494 fsl_ddr_compute(fsl_ddr_info_t *pinfo, unsigned int start_step,
495 unsigned int size_only)
496 {
497 unsigned int i, j;
498 unsigned long long total_mem = 0;
499 int assert_reset = 0;
500 unsigned int first_ctrl = pinfo->first_ctrl;
501 unsigned int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
502 __maybe_unused int retval;
503 __maybe_unused bool goodspd = false;
504 __maybe_unused int dimm_slots_per_ctrl = pinfo->dimm_slots_per_ctrl;
505
506 fsl_ddr_cfg_regs_t *ddr_reg = pinfo->fsl_ddr_config_reg;
507 common_timing_params_t *timing_params = pinfo->common_timing_params;
508 if (pinfo->board_need_mem_reset)
509 assert_reset = pinfo->board_need_mem_reset();
510
511 /* data bus width capacity adjust shift amount */
512 unsigned int dbw_capacity_adjust[CONFIG_SYS_NUM_DDR_CTLRS];
513
514 for (i = first_ctrl; i <= last_ctrl; i++)
515 dbw_capacity_adjust[i] = 0;
516
517 debug("starting at step %u (%s)\n",
518 start_step, step_to_string(start_step));
519
520 switch (start_step) {
521 case STEP_GET_SPD:
522 #if defined(CONFIG_DDR_SPD) || defined(CONFIG_SPD_EEPROM)
523 /* STEP 1: Gather all DIMM SPD data */
524 for (i = first_ctrl; i <= last_ctrl; i++) {
525 fsl_ddr_get_spd(pinfo->spd_installed_dimms[i], i,
526 dimm_slots_per_ctrl);
527 }
528
529 case STEP_COMPUTE_DIMM_PARMS:
530 /* STEP 2: Compute DIMM parameters from SPD data */
531
532 for (i = first_ctrl; i <= last_ctrl; i++) {
533 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
534 generic_spd_eeprom_t *spd =
535 &(pinfo->spd_installed_dimms[i][j]);
536 dimm_params_t *pdimm =
537 &(pinfo->dimm_params[i][j]);
538 retval = compute_dimm_parameters(
539 i, spd, pdimm, j);
540 #ifdef CONFIG_SYS_DDR_RAW_TIMING
541 if (!j && retval) {
542 printf("SPD error on controller %d! "
543 "Trying fallback to raw timing "
544 "calculation\n", i);
545 retval = fsl_ddr_get_dimm_params(pdimm,
546 i, j);
547 }
548 #else
549 if (retval == 2) {
550 printf("Error: compute_dimm_parameters"
551 " non-zero returned FATAL value "
552 "for memctl=%u dimm=%u\n", i, j);
553 return 0;
554 }
555 #endif
556 if (retval) {
557 debug("Warning: compute_dimm_parameters"
558 " non-zero return value for memctl=%u "
559 "dimm=%u\n", i, j);
560 } else {
561 goodspd = true;
562 }
563 }
564 }
565 if (!goodspd) {
566 /*
567 * No valid SPD found
568 * Throw an error if this is for main memory, i.e.
569 * first_ctrl == 0. Otherwise, siliently return 0
570 * as the memory size.
571 */
572 if (first_ctrl == 0)
573 printf("Error: No valid SPD detected.\n");
574
575 return 0;
576 }
577 #elif defined(CONFIG_SYS_DDR_RAW_TIMING)
578 case STEP_COMPUTE_DIMM_PARMS:
579 for (i = first_ctrl; i <= last_ctrl; i++) {
580 for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
581 dimm_params_t *pdimm =
582 &(pinfo->dimm_params[i][j]);
583 fsl_ddr_get_dimm_params(pdimm, i, j);
584 }
585 }
586 debug("Filling dimm parameters from board specific file\n");
587 #endif
588 case STEP_COMPUTE_COMMON_PARMS:
589 /*
590 * STEP 3: Compute a common set of timing parameters
591 * suitable for all of the DIMMs on each memory controller
592 */
593 for (i = first_ctrl; i <= last_ctrl; i++) {
594 debug("Computing lowest common DIMM"
595 " parameters for memctl=%u\n", i);
596 compute_lowest_common_dimm_parameters
597 (i,
598 pinfo->dimm_params[i],
599 &timing_params[i],
600 CONFIG_DIMM_SLOTS_PER_CTLR);
601 }
602
603 case STEP_GATHER_OPTS:
604 /* STEP 4: Gather configuration requirements from user */
605 for (i = first_ctrl; i <= last_ctrl; i++) {
606 debug("Reloading memory controller "
607 "configuration options for memctl=%u\n", i);
608 /*
609 * This "reloads" the memory controller options
610 * to defaults. If the user "edits" an option,
611 * next_step points to the step after this,
612 * which is currently STEP_ASSIGN_ADDRESSES.
613 */
614 populate_memctl_options(
615 &timing_params[i],
616 &pinfo->memctl_opts[i],
617 pinfo->dimm_params[i], i);
618 /*
619 * For RDIMMs, JEDEC spec requires clocks to be stable
620 * before reset signal is deasserted. For the boards
621 * using fixed parameters, this function should be
622 * be called from board init file.
623 */
624 if (timing_params[i].all_dimms_registered)
625 assert_reset = 1;
626 }
627 if (assert_reset && !size_only) {
628 if (pinfo->board_mem_reset) {
629 debug("Asserting mem reset\n");
630 pinfo->board_mem_reset();
631 } else {
632 debug("Asserting mem reset missing\n");
633 }
634 }
635
636 case STEP_ASSIGN_ADDRESSES:
637 /* STEP 5: Assign addresses to chip selects */
638 check_interleaving_options(pinfo);
639 total_mem = step_assign_addresses(pinfo, dbw_capacity_adjust);
640 debug("Total mem %llu assigned\n", total_mem);
641
642 case STEP_COMPUTE_REGS:
643 /* STEP 6: compute controller register values */
644 debug("FSL Memory ctrl register computation\n");
645 for (i = first_ctrl; i <= last_ctrl; i++) {
646 if (timing_params[i].ndimms_present == 0) {
647 memset(&ddr_reg[i], 0,
648 sizeof(fsl_ddr_cfg_regs_t));
649 continue;
650 }
651
652 compute_fsl_memctl_config_regs
653 (i,
654 &pinfo->memctl_opts[i],
655 &ddr_reg[i], &timing_params[i],
656 pinfo->dimm_params[i],
657 dbw_capacity_adjust[i],
658 size_only);
659 }
660
661 default:
662 break;
663 }
664
665 {
666 /*
667 * Compute the amount of memory available just by
668 * looking for the highest valid CSn_BNDS value.
669 * This allows us to also experiment with using
670 * only CS0 when using dual-rank DIMMs.
671 */
672 unsigned int max_end = 0;
673
674 for (i = first_ctrl; i <= last_ctrl; i++) {
675 for (j = 0; j < CONFIG_CHIP_SELECTS_PER_CTRL; j++) {
676 fsl_ddr_cfg_regs_t *reg = &ddr_reg[i];
677 if (reg->cs[j].config & 0x80000000) {
678 unsigned int end;
679 /*
680 * 0xfffffff is a special value we put
681 * for unused bnds
682 */
683 if (reg->cs[j].bnds == 0xffffffff)
684 continue;
685 end = reg->cs[j].bnds & 0xffff;
686 if (end > max_end) {
687 max_end = end;
688 }
689 }
690 }
691 }
692
693 total_mem = 1 + (((unsigned long long)max_end << 24ULL) |
694 0xFFFFFFULL) - pinfo->mem_base;
695 }
696
697 return total_mem;
698 }
699
__fsl_ddr_sdram(fsl_ddr_info_t * pinfo)700 phys_size_t __fsl_ddr_sdram(fsl_ddr_info_t *pinfo)
701 {
702 unsigned int i, first_ctrl, last_ctrl;
703 #ifdef CONFIG_PPC
704 unsigned int law_memctl = LAW_TRGT_IF_DDR_1;
705 #endif
706 unsigned long long total_memory;
707 int deassert_reset = 0;
708
709 first_ctrl = pinfo->first_ctrl;
710 last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
711
712 /* Compute it once normally. */
713 #ifdef CONFIG_FSL_DDR_INTERACTIVE
714 if (tstc() && (getchar() == 'd')) { /* we got a key press of 'd' */
715 total_memory = fsl_ddr_interactive(pinfo, 0);
716 } else if (fsl_ddr_interactive_env_var_exists()) {
717 total_memory = fsl_ddr_interactive(pinfo, 1);
718 } else
719 #endif
720 total_memory = fsl_ddr_compute(pinfo, STEP_GET_SPD, 0);
721
722 /* setup 3-way interleaving before enabling DDRC */
723 switch (pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode) {
724 case FSL_DDR_3WAY_1KB_INTERLEAVING:
725 case FSL_DDR_3WAY_4KB_INTERLEAVING:
726 case FSL_DDR_3WAY_8KB_INTERLEAVING:
727 fsl_ddr_set_intl3r(
728 pinfo->memctl_opts[first_ctrl].
729 memctl_interleaving_mode);
730 break;
731 default:
732 break;
733 }
734
735 /*
736 * Program configuration registers.
737 * JEDEC specs requires clocks to be stable before deasserting reset
738 * for RDIMMs. Clocks start after chip select is enabled and clock
739 * control register is set. During step 1, all controllers have their
740 * registers set but not enabled. Step 2 proceeds after deasserting
741 * reset through board FPGA or GPIO.
742 * For non-registered DIMMs, initialization can go through but it is
743 * also OK to follow the same flow.
744 */
745 if (pinfo->board_need_mem_reset)
746 deassert_reset = pinfo->board_need_mem_reset();
747 for (i = first_ctrl; i <= last_ctrl; i++) {
748 if (pinfo->common_timing_params[i].all_dimms_registered)
749 deassert_reset = 1;
750 }
751 for (i = first_ctrl; i <= last_ctrl; i++) {
752 debug("Programming controller %u\n", i);
753 if (pinfo->common_timing_params[i].ndimms_present == 0) {
754 debug("No dimms present on controller %u; "
755 "skipping programming\n", i);
756 continue;
757 }
758 /*
759 * The following call with step = 1 returns before enabling
760 * the controller. It has to finish with step = 2 later.
761 */
762 fsl_ddr_set_memctl_regs(&(pinfo->fsl_ddr_config_reg[i]), i,
763 deassert_reset ? 1 : 0);
764 }
765 if (deassert_reset) {
766 /* Use board FPGA or GPIO to deassert reset signal */
767 if (pinfo->board_mem_de_reset) {
768 debug("Deasserting mem reset\n");
769 pinfo->board_mem_de_reset();
770 } else {
771 debug("Deasserting mem reset missing\n");
772 }
773 for (i = first_ctrl; i <= last_ctrl; i++) {
774 /* Call with step = 2 to continue initialization */
775 fsl_ddr_set_memctl_regs(&(pinfo->fsl_ddr_config_reg[i]),
776 i, 2);
777 }
778 }
779
780 #ifdef CONFIG_FSL_DDR_SYNC_REFRESH
781 fsl_ddr_sync_memctl_refresh(first_ctrl, last_ctrl);
782 #endif
783
784 #ifdef CONFIG_PPC
785 /* program LAWs */
786 for (i = first_ctrl; i <= last_ctrl; i++) {
787 if (pinfo->memctl_opts[i].memctl_interleaving) {
788 switch (pinfo->memctl_opts[i].
789 memctl_interleaving_mode) {
790 case FSL_DDR_CACHE_LINE_INTERLEAVING:
791 case FSL_DDR_PAGE_INTERLEAVING:
792 case FSL_DDR_BANK_INTERLEAVING:
793 case FSL_DDR_SUPERBANK_INTERLEAVING:
794 if (i % 2)
795 break;
796 if (i == 0) {
797 law_memctl = LAW_TRGT_IF_DDR_INTRLV;
798 fsl_ddr_set_lawbar(
799 &pinfo->common_timing_params[i],
800 law_memctl, i);
801 }
802 #if CONFIG_SYS_NUM_DDR_CTLRS > 3
803 else if (i == 2) {
804 law_memctl = LAW_TRGT_IF_DDR_INTLV_34;
805 fsl_ddr_set_lawbar(
806 &pinfo->common_timing_params[i],
807 law_memctl, i);
808 }
809 #endif
810 break;
811 case FSL_DDR_3WAY_1KB_INTERLEAVING:
812 case FSL_DDR_3WAY_4KB_INTERLEAVING:
813 case FSL_DDR_3WAY_8KB_INTERLEAVING:
814 law_memctl = LAW_TRGT_IF_DDR_INTLV_123;
815 if (i == 0) {
816 fsl_ddr_set_lawbar(
817 &pinfo->common_timing_params[i],
818 law_memctl, i);
819 }
820 break;
821 case FSL_DDR_4WAY_1KB_INTERLEAVING:
822 case FSL_DDR_4WAY_4KB_INTERLEAVING:
823 case FSL_DDR_4WAY_8KB_INTERLEAVING:
824 law_memctl = LAW_TRGT_IF_DDR_INTLV_1234;
825 if (i == 0)
826 fsl_ddr_set_lawbar(
827 &pinfo->common_timing_params[i],
828 law_memctl, i);
829 /* place holder for future 4-way interleaving */
830 break;
831 default:
832 break;
833 }
834 } else {
835 switch (i) {
836 case 0:
837 law_memctl = LAW_TRGT_IF_DDR_1;
838 break;
839 case 1:
840 law_memctl = LAW_TRGT_IF_DDR_2;
841 break;
842 case 2:
843 law_memctl = LAW_TRGT_IF_DDR_3;
844 break;
845 case 3:
846 law_memctl = LAW_TRGT_IF_DDR_4;
847 break;
848 default:
849 break;
850 }
851 fsl_ddr_set_lawbar(&pinfo->common_timing_params[i],
852 law_memctl, i);
853 }
854 }
855 #endif
856
857 debug("total_memory by %s = %llu\n", __func__, total_memory);
858
859 #if !defined(CONFIG_PHYS_64BIT)
860 /*
861 * Show warning about big DDR moodules. But avoid warning for 4 GB DDR
862 * modules when U-Boot supports RAM of maximal size 4 GB - 1 byte.
863 */
864 if ((first_ctrl == 0) && (total_memory - 1 > (phys_size_t)~0ULL)) {
865 puts("Detected ");
866 print_size(total_memory, " of memory\n");
867 #ifndef CONFIG_SPL_BUILD
868 puts(" "); /* re-align to match init_dram print */
869 #endif
870 puts("This U-Boot only supports <= ");
871 print_size((unsigned long long)((phys_size_t)~0ULL)+1, " of DDR\n");
872 #ifndef CONFIG_SPL_BUILD
873 puts(" "); /* re-align to match init_dram print */
874 #endif
875 puts("You could rebuild it with CONFIG_PHYS_64BIT\n");
876 #ifndef CONFIG_SPL_BUILD
877 puts(" "); /* re-align to match init_dram print */
878 #endif
879 }
880 #endif
881
882 /* Ensure that total_memory does not overflow on return */
883 if (total_memory > (phys_size_t)~0ULL)
884 total_memory = (phys_size_t)~0ULL;
885
886 return total_memory;
887 }
888
889 /*
890 * fsl_ddr_sdram(void) -- this is the main function to be
891 * called by dram_init() in the board file.
892 *
893 * It returns amount of memory configured in bytes.
894 */
fsl_ddr_sdram(void)895 phys_size_t fsl_ddr_sdram(void)
896 {
897 fsl_ddr_info_t info;
898
899 /* Reset info structure. */
900 memset(&info, 0, sizeof(fsl_ddr_info_t));
901 info.mem_base = CFG_SYS_FSL_DDR_SDRAM_BASE_PHY;
902 info.first_ctrl = 0;
903 info.num_ctrls = CONFIG_SYS_FSL_DDR_MAIN_NUM_CTRLS;
904 info.dimm_slots_per_ctrl = CONFIG_DIMM_SLOTS_PER_CTLR;
905 info.board_need_mem_reset = board_need_mem_reset;
906 info.board_mem_reset = board_assert_mem_reset;
907 info.board_mem_de_reset = board_deassert_mem_reset;
908 remove_unused_controllers(&info);
909
910 return __fsl_ddr_sdram(&info);
911 }
912
913 #ifdef CONFIG_SYS_FSL_OTHER_DDR_NUM_CTRLS
fsl_other_ddr_sdram(unsigned long long base,unsigned int first_ctrl,unsigned int num_ctrls,unsigned int dimm_slots_per_ctrl,int (* board_need_reset)(void),void (* board_reset)(void),void (* board_de_reset)(void))914 phys_size_t fsl_other_ddr_sdram(unsigned long long base,
915 unsigned int first_ctrl,
916 unsigned int num_ctrls,
917 unsigned int dimm_slots_per_ctrl,
918 int (*board_need_reset)(void),
919 void (*board_reset)(void),
920 void (*board_de_reset)(void))
921 {
922 fsl_ddr_info_t info;
923
924 /* Reset info structure. */
925 memset(&info, 0, sizeof(fsl_ddr_info_t));
926 info.mem_base = base;
927 info.first_ctrl = first_ctrl;
928 info.num_ctrls = num_ctrls;
929 info.dimm_slots_per_ctrl = dimm_slots_per_ctrl;
930 info.board_need_mem_reset = board_need_reset;
931 info.board_mem_reset = board_reset;
932 info.board_mem_de_reset = board_de_reset;
933
934 return __fsl_ddr_sdram(&info);
935 }
936 #endif
937
938 /*
939 * fsl_ddr_sdram_size(first_ctrl, last_intlv) - This function only returns the
940 * size of the total memory without setting ddr control registers.
941 */
942 phys_size_t
fsl_ddr_sdram_size(void)943 fsl_ddr_sdram_size(void)
944 {
945 fsl_ddr_info_t info;
946 unsigned long long total_memory = 0;
947
948 memset(&info, 0 , sizeof(fsl_ddr_info_t));
949 info.mem_base = CFG_SYS_FSL_DDR_SDRAM_BASE_PHY;
950 info.first_ctrl = 0;
951 info.num_ctrls = CONFIG_SYS_FSL_DDR_MAIN_NUM_CTRLS;
952 info.dimm_slots_per_ctrl = CONFIG_DIMM_SLOTS_PER_CTLR;
953 info.board_need_mem_reset = NULL;
954 remove_unused_controllers(&info);
955
956 /* Compute it once normally. */
957 total_memory = fsl_ddr_compute(&info, STEP_GET_SPD, 1);
958
959 /* Ensure that total_memory does not overflow on return */
960 if (total_memory > (phys_size_t)~0ULL)
961 total_memory = (phys_size_t)~0ULL;
962
963 return total_memory;
964 }
965