1 /*
2 * Copyright (c) 2024-2025, Altera Corporation. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <errno.h>
8 #include <stdio.h>
9 #include <string.h>
10
11 #include <common/debug.h>
12 #include <drivers/delay_timer.h>
13
14 #include "agilex5_iossm_mailbox.h"
15
16 /* supported DDR type list */
17 static const char *ddr_type_list[7] = {
18 "DDR4", "DDR5", "DDR5_RDIMM", "LPDDR4", "LPDDR5", "QDRIV", "UNKNOWN"
19 };
20
wait_for_bit(const void * reg,const uint32_t mask,const bool set,const unsigned int timeout_ms)21 static inline int wait_for_bit(const void *reg,
22 const uint32_t mask,
23 const bool set,
24 const unsigned int timeout_ms)
25 {
26 uint32_t val;
27 uint32_t timeout_sec = (timeout_ms / 1000);
28
29 while (timeout_sec > 0) {
30 val = mmio_read_32((uintptr_t)reg);
31
32 INFO("IOSSM: timeout_sec %d, val %x\n", timeout_sec, val);
33
34 if (!set) {
35 val = ~val;
36 }
37
38 if ((val & mask) == mask) {
39 INFO("IOSSM: %s, success\n", __func__);
40 return 0;
41 }
42
43 /* one second delay */
44 mdelay(1000);
45
46 timeout_sec--;
47 }
48
49 ERROR("IOSSM: %s, failed, time out\n", __func__);
50 return -ETIMEDOUT;
51 }
52
io96b_mb_req(phys_addr_t io96b_csr_addr,uint32_t ip_type,uint32_t instance_id,uint32_t usr_cmd_type,uint32_t usr_cmd_opcode,uint32_t cmd_param_0,uint32_t cmd_param_1,uint32_t cmd_param_2,uint32_t cmd_param_3,uint32_t cmd_param_4,uint32_t cmd_param_5,uint32_t cmd_param_6,uint32_t resp_data_len,struct io96b_mb_resp * resp)53 int io96b_mb_req(phys_addr_t io96b_csr_addr, uint32_t ip_type, uint32_t instance_id,
54 uint32_t usr_cmd_type, uint32_t usr_cmd_opcode, uint32_t cmd_param_0,
55 uint32_t cmd_param_1, uint32_t cmd_param_2, uint32_t cmd_param_3,
56 uint32_t cmd_param_4, uint32_t cmd_param_5, uint32_t cmd_param_6,
57 uint32_t resp_data_len, struct io96b_mb_resp *resp)
58 {
59 int i;
60 int ret;
61 uint32_t cmd_req, cmd_resp;
62
63 /* Initialized zeros for responses*/
64 resp->cmd_resp_status = 0;
65 resp->cmd_resp_data_0 = 0;
66 resp->cmd_resp_data_1 = 0;
67 resp->cmd_resp_data_2 = 0;
68
69 /* Ensure CMD_REQ is cleared before write any command request */
70 ret = wait_for_bit((const void *)(io96b_csr_addr + IOSSM_CMD_REQ_OFFSET),
71 GENMASK(31, 0), 0, IOSSM_TIMEOUT_MS);
72
73 if (ret != 0) {
74 ERROR("%s: CMD_REQ not ready\n", __func__);
75 return -1;
76 }
77
78 /* Write CMD_PARAM_* */
79 for (i = 0; i < 6 ; i++) {
80 switch (i) {
81 case 0:
82 if (cmd_param_0 != 0) {
83 mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_0_OFFSET,
84 cmd_param_0);
85 }
86 break;
87 case 1:
88 if (cmd_param_1 != 0) {
89 mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_1_OFFSET,
90 cmd_param_1);
91 }
92 break;
93 case 2:
94 if (cmd_param_2 != 0) {
95 mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_2_OFFSET,
96 cmd_param_2);
97 }
98 break;
99 case 3:
100 if (cmd_param_3 != 0) {
101 mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_3_OFFSET,
102 cmd_param_3);
103 }
104 break;
105 case 4:
106 if (cmd_param_4 != 0) {
107 mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_4_OFFSET,
108 cmd_param_4);
109 }
110 break;
111 case 5:
112 if (cmd_param_5 != 0) {
113 mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_5_OFFSET,
114 cmd_param_5);
115 }
116 break;
117 case 6:
118 if (cmd_param_6 != 0) {
119 mmio_write_32(io96b_csr_addr + IOSSM_CMD_PARAM_6_OFFSET,
120 cmd_param_6);
121 }
122 break;
123 default:
124 ERROR("IOSSM: %s: Invalid command parameter\n", __func__);
125 }
126 }
127
128 /* Write CMD_REQ (IP_TYPE, IP_INSTANCE_ID, CMD_TYPE and CMD_OPCODE) */
129 cmd_req = (usr_cmd_opcode << 0) | (usr_cmd_type << 16) | (instance_id << 24) |
130 (ip_type << 29);
131 mmio_write_32(io96b_csr_addr + IOSSM_CMD_REQ_OFFSET, cmd_req);
132 INFO("IOSSM: %s: Write 0x%x to IOSSM_CMD_REQ_OFFSET 0x%llx\n",
133 __func__, cmd_req, io96b_csr_addr + IOSSM_CMD_REQ_OFFSET);
134
135 /* Read CMD_RESPONSE_READY in CMD_RESPONSE_STATUS*/
136 ret = wait_for_bit((const void *)(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET),
137 IOSSM_STATUS_COMMAND_RESPONSE_READY, 1, IOSSM_TIMEOUT_MS);
138
139 if (ret != 0) {
140 ERROR("%s: CMD_RESPONSE ERROR:\n", __func__);
141 cmd_resp = (io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET);
142 ERROR("%s: STATUS_GENERAL_ERROR: 0x%x\n", __func__, (cmd_resp >> 1) & 0xF);
143 ERROR("%s: STATUS_CMD_RESPONSE_ERROR: 0x%x\n", __func__, (cmd_resp >> 5) & 0x7);
144 }
145
146 /* read CMD_RESPONSE_STATUS*/
147 resp->cmd_resp_status = mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET);
148 INFO("IOSSM: %s: CMD_RESPONSE_STATUS 0x%llx: 0x%x\n",
149 __func__, io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET, resp->cmd_resp_status);
150
151 /* read CMD_RESPONSE_DATA_* */
152 for (i = 0; i < resp_data_len; i++) {
153 switch (i) {
154 case 0:
155 resp->cmd_resp_data_0 =
156 mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_0_OFFSET);
157
158 break;
159 case 1:
160 resp->cmd_resp_data_1 =
161 mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_1_OFFSET);
162
163 break;
164 case 2:
165 resp->cmd_resp_data_2 =
166 mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_DATA_2_OFFSET);
167 break;
168 default:
169 ERROR("%s: Invalid response data\n", __func__);
170 }
171 }
172
173 resp->cmd_resp_status = mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET);
174 INFO("IOSSM: %s: CMD_RESPONSE_STATUS 0x%llx: 0x%x\n",
175 __func__, io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET, resp->cmd_resp_status);
176
177 /* write CMD_RESPONSE_READY = 0 */
178 mmio_clrbits_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET,
179 IOSSM_STATUS_COMMAND_RESPONSE_READY);
180
181 resp->cmd_resp_status = mmio_read_32(io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET);
182 INFO("IOSSM: %s: CMD_RESPONSE_READY 0x%llx: 0x%x\n",
183 __func__, io96b_csr_addr + IOSSM_CMD_RESPONSE_STATUS_OFFSET, resp->cmd_resp_status);
184
185 return 0;
186 }
187
188 /*
189 * Initial function to be called to set memory interface IP type and instance ID
190 * IP type and instance ID need to be determined before sending mailbox command
191 */
io96b_mb_init(struct io96b_info * io96b_ctrl)192 void io96b_mb_init(struct io96b_info *io96b_ctrl)
193 {
194 uint8_t ip_type_ret, instance_id_ret;
195 int i, j, k;
196 uint32_t mem_interface_0, mem_interface_1;
197
198 for (i = 0; i < io96b_ctrl->num_instance; i++) {
199 struct io96b_instance *inst = (i == 0) ?
200 &io96b_ctrl->io96b_0 : &io96b_ctrl->io96b_1;
201 phys_addr_t base = inst->io96b_csr_addr;
202
203 j = 0;
204 inst->mb_ctrl.num_mem_interface = 0;
205
206 mem_interface_0 = mmio_read_32(base + IOSSM_MEM_INTF_INFO_0_OFFSET);
207 mem_interface_1 = mmio_read_32(base + IOSSM_MEM_INTF_INFO_1_OFFSET);
208
209 for (k = 0; k < MAX_MEM_INTERFACES_SUPPORTED; k++) {
210 switch (k) {
211 case 0:
212 ip_type_ret = FIELD_GET(INTF_IP_TYPE_MASK, mem_interface_0);
213 instance_id_ret = FIELD_GET(INTF_INSTANCE_ID_MASK, mem_interface_0);
214 break;
215 case 1:
216 ip_type_ret = FIELD_GET(INTF_IP_TYPE_MASK, mem_interface_1);
217 instance_id_ret = FIELD_GET(INTF_INSTANCE_ID_MASK, mem_interface_1);
218 break;
219 }
220
221 if (ip_type_ret != 0) {
222 inst->mb_ctrl.ip_type[j] = ip_type_ret;
223 inst->mb_ctrl.ip_instance_id[j] = instance_id_ret;
224 inst->mb_ctrl.num_mem_interface++;
225 j++;
226 }
227 }
228 }
229 }
230
hang(void)231 static inline void hang(void)
232 {
233 ERROR("IOSSM: %s: system is going to die :(\n", __func__);
234 while (1)
235 ;
236 }
237
io96b_cal_status(phys_addr_t addr)238 int io96b_cal_status(phys_addr_t addr)
239 {
240 int cal_busy_status, cal_success_status;
241 phys_addr_t status_addr = addr + IOSSM_STATUS_OFFSET;
242
243 /* Ensure calibration fail status */
244 cal_busy_status = wait_for_bit((const void *)status_addr, IOSSM_STATUS_CAL_FAIL,
245 false, 15000);
246 if (cal_busy_status != 0) {
247 ERROR("IOSSM: One or more EMIF instances are failed with calibration\n");
248 return -EBUSY;
249 }
250
251 /* Calibration success status check */
252 NOTICE("IOSSM: Calibration success status check...\n");
253 cal_success_status = wait_for_bit((const void *)status_addr, IOSSM_STATUS_CAL_SUCCESS,
254 true, 15000);
255 if (cal_success_status != 0) {
256 ERROR("IOSSM: One/more EMIF instances either failed to calibrate/not completed\n");
257 return -EBUSY;
258 }
259
260 NOTICE("IOSSM: All EMIF instances within the IO96 have calibrated successfully!\n");
261 return 0;
262 }
263
init_mem_cal(struct io96b_info * io96b_ctrl)264 void init_mem_cal(struct io96b_info *io96b_ctrl)
265 {
266 int count = 0;
267 int ret;
268
269 io96b_ctrl->overall_cal_status = false;
270
271 for (int i = 0; i < io96b_ctrl->num_instance; i++) {
272 struct io96b_instance *inst = (i == 0) ?
273 &io96b_ctrl->io96b_0 : &io96b_ctrl->io96b_1;
274
275 ret = io96b_cal_status(inst->io96b_csr_addr);
276 if (ret != 0) {
277 inst->cal_status = false;
278 ERROR("IOSSM: %s: Initial DDR calibration IO96B failed %d\n",
279 __func__, i);
280 continue;
281 }
282
283 inst->cal_status = true;
284 INFO("IOSSM: %s: Initial DDR calibration IO96B_%d succeeded\n", __func__, i);
285 count++;
286 }
287
288 if (count == io96b_ctrl->num_instance)
289 io96b_ctrl->overall_cal_status = true;
290 }
291
292 /*
293 * Trying 3 times re-calibration if initial calibration failed
294 */
trig_mem_cal(struct io96b_info * io96b_ctrl)295 int trig_mem_cal(struct io96b_info *io96b_ctrl)
296 {
297 bool recal_success;
298 uint8_t cal_stat;
299 uint32_t cmd_req;
300 phys_addr_t base;
301 int iface;
302
303 for (int inst = 0; inst < io96b_ctrl->num_instance; inst++) {
304 struct io96b_instance *inst_ctrl = (inst == 0) ?
305 &io96b_ctrl->io96b_0 : &io96b_ctrl->io96b_1;
306 if (inst_ctrl->cal_status)
307 continue;
308
309 base = inst_ctrl->io96b_csr_addr;
310
311 for (iface = 0; iface < 2; iface++) {
312 // 1. Wait for CMD_REQ to be 0
313 if (wait_for_bit((void *)(base + IOSSM_CMD_REQ_OFFSET),
314 GENMASK(31, 0), 0, IOSSM_TIMEOUT_MS) != 0) {
315 ERROR("%s: CMD_REQ not ready\n", __func__);
316 hang();
317 }
318
319 // 2. Write CMD_REQ: GET_MEM_CAL_STATUS
320 cmd_req = (GET_MEM_CAL_STATUS << 0) |
321 (CMD_TRIG_MEM_CAL_OP << 16) |
322 (0 << 24) | (0 << 29);
323 mmio_write_32(base + IOSSM_CMD_REQ_OFFSET, cmd_req);
324
325 // 3. Wait for response ready
326 if (wait_for_bit((void *)(base + IOSSM_CMD_RESPONSE_STATUS_OFFSET),
327 IOSSM_STATUS_COMMAND_RESPONSE_READY, 1, IOSSM_TIMEOUT_MS) != 0) {
328 ERROR("%s: Response timeout\n", __func__);
329 hang();
330 }
331
332 // 4. Read response
333 uint32_t resp = mmio_read_32(base + (iface == 0 ?
334
335 IOSSM_CMD_RESPONSE_DATA_0_OFFSET :
336 IOSSM_CMD_RESPONSE_DATA_1_OFFSET));
337
338 cal_stat = resp & GENMASK(2, 0);
339 recal_success = false;
340
341 // 5. Recalibrate if necessary
342 for (int retry = 0; retry < 3 && cal_stat >= 0x2; retry++) {
343 // Wait for CMD_REQ clear
344 if (wait_for_bit((void *)(base + IOSSM_CMD_REQ_OFFSET),
345 GENMASK(31, 0), 0, IOSSM_TIMEOUT_MS) != 0) {
346 ERROR("%s: CMD_REQ not ready (retry)\n", __func__);
347 hang();
348 }
349
350 // Send TRIG_MEM_CAL
351 cmd_req = (TRIG_MEM_CAL << 0) |
352 (CMD_TRIG_MEM_CAL_OP << 16) |
353 (inst_ctrl->mb_ctrl.ip_instance_id[iface] << 24) |
354 (inst_ctrl->mb_ctrl.ip_type[iface] << 29);
355 mmio_write_32(base + IOSSM_CMD_REQ_OFFSET, cmd_req);
356
357 mdelay(1000);
358
359 // Send GET_MEM_CAL_STATUS again
360 if (wait_for_bit((void *)(base + IOSSM_CMD_REQ_OFFSET),
361 GENMASK(31, 0), 0, IOSSM_TIMEOUT_MS) != 0) {
362 ERROR("%s: CMD_REQ not ready (post-cal)\n", __func__);
363 hang();
364 }
365 cmd_req = (GET_MEM_CAL_STATUS << 0) |
366 (CMD_TRIG_MEM_CAL_OP << 16) |
367 (0 << 24) | (0 << 29);
368 mmio_write_32(base + IOSSM_CMD_REQ_OFFSET, cmd_req);
369
370 if (wait_for_bit((void *)(base + IOSSM_CMD_RESPONSE_STATUS_OFFSET),
371 IOSSM_STATUS_COMMAND_RESPONSE_READY, 1,
372 IOSSM_TIMEOUT_MS) != 0) {
373 ERROR("%s: Response timeout (post-cal)\n", __func__);
374 hang();
375 }
376
377 resp = mmio_read_32(base + (iface == 0 ?
378 IOSSM_CMD_RESPONSE_DATA_0_OFFSET :
379 IOSSM_CMD_RESPONSE_DATA_1_OFFSET));
380 cal_stat = resp & GENMASK(2, 0);
381
382 if (cal_stat < 0x2) {
383 recal_success = true;
384 break;
385 }
386 }
387
388 if (!recal_success) {
389 ERROR("IOSSM: SDRAM calibration failed (inst %d iface %d)\n",
390 inst, iface);
391 hang();
392 }
393
394 // Clear CMD_RESPONSE_READY
395 mmio_clrbits_32(base + IOSSM_CMD_RESPONSE_STATUS_OFFSET,
396 IOSSM_STATUS_COMMAND_RESPONSE_READY);
397 }
398
399 inst_ctrl->cal_status = true;
400 }
401
402 if (io96b_ctrl->io96b_0.cal_status && io96b_ctrl->io96b_1.cal_status) {
403 INFO("IOSSM: %s: Overall SDRAM calibration success\n", __func__);
404 io96b_ctrl->overall_cal_status = true;
405 }
406
407 return 0;
408 }
409
get_mem_technology(struct io96b_info * io96b_ctrl)410 int get_mem_technology(struct io96b_info *io96b_ctrl)
411 {
412 int inst_idx, iface_idx;
413 phys_addr_t base;
414 uint32_t cmd_req, resp;
415 uint8_t ddr_type_ret;
416 int iface_count;
417 uint32_t *ip_type;
418 uint32_t *ip_inst;
419
420 /* Default to UNKNOWN */
421 io96b_ctrl->ddr_type = ddr_type_list[6]; // "UNKNOWN"
422
423 for (inst_idx = 0; inst_idx < io96b_ctrl->num_instance; inst_idx++) {
424 struct io96b_instance *inst_ctrl = (inst_idx == 0) ?
425 &io96b_ctrl->io96b_0 : &io96b_ctrl->io96b_1;
426
427 base = inst_ctrl->io96b_csr_addr;
428 iface_count = inst_ctrl->mb_ctrl.num_mem_interface;
429 ip_type = inst_ctrl->mb_ctrl.ip_type;
430 ip_inst = inst_ctrl->mb_ctrl.ip_instance_id;
431
432 for (iface_idx = 0; iface_idx < iface_count; iface_idx++) {
433 /* Ensure CMD_REQ is available */
434 if (wait_for_bit((void *)(base + IOSSM_CMD_REQ_OFFSET),
435 GENMASK(31, 0), 0, IOSSM_TIMEOUT_MS) != 0) {
436 ERROR("%s: CMD_REQ not ready (IO96B_%d iface %d)\n",
437 __func__, inst_idx, iface_idx);
438 return -ETIMEDOUT;
439 }
440
441 /* Compose and send CMD_REQ */
442 cmd_req = (GET_MEM_TECHNOLOGY << 0) |
443 (CMD_GET_MEM_INFO << 16) |
444 (ip_inst[iface_idx] << 24) |
445 (ip_type[iface_idx] << 29);
446 mmio_write_32(base + IOSSM_CMD_REQ_OFFSET, cmd_req);
447
448 /* Wait for response */
449 if (wait_for_bit((void *)(base + IOSSM_CMD_RESPONSE_STATUS_OFFSET),
450 IOSSM_STATUS_COMMAND_RESPONSE_READY, 1, IOSSM_TIMEOUT_MS) != 0) {
451 ERROR("%s: CMD_RESPONSE timeout (IO96B_%d iface %d)\n",
452 __func__, inst_idx, iface_idx);
453 return -ETIMEDOUT;
454 }
455
456 /* Extract and interpret DDR type */
457 resp = mmio_read_32(base + IOSSM_CMD_RESPONSE_STATUS_OFFSET);
458 ddr_type_ret = IOSSM_CMD_RESPONSE_DATA_SHORT(resp) & GENMASK(2, 0);
459
460 if (ddr_type_ret > 6) {
461 ERROR("%s: Invalid DDR type ID: %u\n", __func__, ddr_type_ret);
462 return -EINVAL;
463 }
464
465 if (strcmp(io96b_ctrl->ddr_type, "UNKNOWN") == 0) {
466 io96b_ctrl->ddr_type = ddr_type_list[ddr_type_ret];
467 } else if (strcmp(ddr_type_list[ddr_type_ret], io96b_ctrl->ddr_type) != 0) {
468 ERROR("IOSSM: DDR type mismatch on IO96B_%d iface %d: %s != %s\n",
469 inst_idx, iface_idx,
470 io96b_ctrl->ddr_type, ddr_type_list[ddr_type_ret]);
471 return -ENOEXEC;
472 }
473
474 INFO("IOSSM: DDR type on IO96B_%d iface %d is %s\n",
475 inst_idx, iface_idx, io96b_ctrl->ddr_type);
476
477 /* Clear CMD_RESPONSE_READY */
478 mmio_clrbits_32(base + IOSSM_CMD_RESPONSE_STATUS_OFFSET,
479 IOSSM_STATUS_COMMAND_RESPONSE_READY);
480 }
481 }
482
483 return 0;
484 }
485
get_mem_width_info(struct io96b_info * io96b_ctrl)486 int get_mem_width_info(struct io96b_info *io96b_ctrl)
487 {
488 int i, j;
489 phys_size_t memory_size = 0U;
490 phys_size_t total_memory_size = 0U;
491 uint32_t mem_width_info;
492 uint32_t mem_total_capacity_intf_offset[MAX_MEM_INTERFACE_SUPPORTED] = {
493 IOSSM_MEM_TOTAL_CAPACITY_INTF0_OFFSET,
494 IOSSM_MEM_TOTAL_CAPACITY_INTF1_OFFSET
495 };
496
497 for (i = 0; i < io96b_ctrl->num_instance; i++) {
498 struct io96b_instance *instance = (i == 0) ?
499 &io96b_ctrl->io96b_0 : &io96b_ctrl->io96b_1;
500
501 memory_size = 0;
502
503 for (j = 0; j < instance->mb_ctrl.num_mem_interface; j++) {
504 mem_width_info = mmio_read_32(instance->io96b_csr_addr +
505 mem_total_capacity_intf_offset[j]);
506
507 instance->mb_ctrl.memory_size[j] =
508 FIELD_GET(INTF_CAPACITY_GBITS_MASK, mem_width_info) * SZ_1G / SZ_8;
509
510 if (instance->mb_ctrl.memory_size[j] != 0)
511 memory_size += instance->mb_ctrl.memory_size[j];
512 }
513
514 if (memory_size == 0U) {
515 ERROR("IOSSM: %s: Failed to get valid memory size on IO96B_%d\n",
516 __func__, i);
517 return -ENOEXEC;
518 }
519
520 total_memory_size += memory_size;
521 }
522
523 if (total_memory_size == 0U) {
524 ERROR("IOSSM: %s: Total memory size is zero\n", __func__);
525 return -ENOEXEC;
526 }
527
528 io96b_ctrl->overall_size = total_memory_size;
529 return 0;
530 }
531
print_ecc_enable_status(uint32_t ecc_status)532 static inline void print_ecc_enable_status(uint32_t ecc_status)
533 {
534 uint8_t ecc_en_type = ecc_status & GENMASK(1, 0);
535 bool ecc_type = ecc_status & BIT(8) ? true : false;
536
537 INFO("DDR: ECC enable type: %s\n",
538 (ecc_en_type == 0) ? "Disabled" :
539 ((ecc_en_type == 1) ? "Enabled without error correction or detection" :
540 ((ecc_en_type == 2) ? "Enabled with error correction, without detection" :
541 ((ecc_en_type == 3) ? "Enabled with error correction and detection" : "Unknown"))));
542
543 if (ecc_en_type != 0)
544 INFO("DDR: ECC type: %s\n", ecc_type ? "In-line" : "Out-of-Band");
545 }
546
ecc_enable_status(struct io96b_info * io96b_ctrl)547 int ecc_enable_status(struct io96b_info *io96b_ctrl)
548 {
549 int i, j;
550 bool ecc_stat_set = false;
551 bool ecc_stat, is_inline_ecc;
552 uint32_t ecc_enable_interface;
553 uint32_t ecc_enable_intf_offset[MAX_MEM_INTERFACE_SUPPORTED] = {
554 IOSSM_ECC_ENABLE_INTF0_OFFSET,
555 IOSSM_ECC_ENABLE_INTF1_OFFSET
556 };
557
558 /* Initialize ECC status */
559 io96b_ctrl->ecc_status = false;
560 io96b_ctrl->is_inline_ecc = false;
561
562 for (i = 0; i < io96b_ctrl->num_instance; i++) {
563 struct io96b_instance *inst_ctrl = (i == 0) ?
564 &io96b_ctrl->io96b_0 : &io96b_ctrl->io96b_1;
565
566 for (j = 0; j < inst_ctrl->mb_ctrl.num_mem_interface; j++) {
567 ecc_enable_interface = mmio_read_32(inst_ctrl->io96b_csr_addr +
568 ecc_enable_intf_offset[j]);
569
570 print_ecc_enable_status(ecc_enable_interface);
571
572 ecc_stat = FIELD_GET(INTF_ECC_ENABLE_TYPE_MASK, ecc_enable_interface) != 0;
573 is_inline_ecc = FIELD_GET(INTF_ECC_TYPE_MASK, ecc_enable_interface);
574
575 if (!ecc_stat_set) {
576 io96b_ctrl->ecc_status = ecc_stat;
577 if (ecc_stat) {
578 io96b_ctrl->is_inline_ecc = is_inline_ecc;
579 }
580 ecc_stat_set = true;
581 }
582
583 if (ecc_stat != io96b_ctrl->ecc_status) {
584 ERROR("IOSSM: %s: Mismatch ECC status on IO96B_%d INT%d\n",
585 __func__, i, j);
586 return -ENOEXEC;
587 }
588 }
589 }
590
591 NOTICE("DDR: ECC is %s%s\n",
592 io96b_ctrl->ecc_status ? "enabled" : "disabled",
593 io96b_ctrl->ecc_status && io96b_ctrl->is_inline_ecc ? " (inline ECC)" : "");
594
595 return 0;
596 }
597
is_double_bit_error(enum ecc_error_type err_type)598 static bool is_double_bit_error(enum ecc_error_type err_type)
599 {
600 switch (err_type) {
601 case DOUBLE_BIT_ERROR:
602 case MULTIPLE_DOUBLE_BIT_ERRORS:
603 case WRITE_LINK_DOUBLE_BIT_ERROR:
604 case READ_LINK_DOUBLE_BIT_ERROR:
605 case READ_MODIFY_WRITE_DOUBLE_BIT_ERROR:
606 return true;
607
608 default:
609 return false;
610 }
611 }
612
get_ecc_dbe_status(struct io96b_info * io96b_ctrl)613 bool get_ecc_dbe_status(struct io96b_info *io96b_ctrl)
614 {
615 uint32_t ecc_err_status;
616 uint16_t ecc_err_counter;
617 bool ecc_dbe_err_flag = false;
618
619 struct io96b_instance *instances[] = {
620 &io96b_ctrl->io96b_0,
621 &io96b_ctrl->io96b_1
622 };
623
624 for (uint32_t i = 0; i < io96b_ctrl->num_instance; i++) {
625 struct io96b_instance *curr_inst = instances[i];
626 uint32_t base = curr_inst->io96b_csr_addr;
627
628 uint32_t instance_id = *curr_inst->mb_ctrl.ip_instance_id;
629 uint32_t ip_type = *curr_inst->mb_ctrl.ip_type;
630
631 /* Read ECC error status register */
632 ecc_err_status = mmio_read_32(base + IOSSM_ECC_ERR_STATUS_OFFSET);
633 ecc_err_counter = FIELD_GET(ECC_ERR_COUNTER_MASK, ecc_err_status);
634
635 INFO("DDR: ECC error status register on IO96B_%d: 0x%x\n", i, ecc_err_status);
636
637 if (ecc_err_counter == 0)
638 continue;
639
640 uint32_t address = base + IOSSM_ECC_ERR_DATA_START_OFFSET;
641
642 for (uint32_t j = 0; j < ecc_err_counter && j < MAX_ECC_ERR_COUNT; j++) {
643 uint32_t ecc_err_data = mmio_read_32(address);
644 uint32_t ecc_err_data_lo = mmio_read_32(address + sizeof(uint32_t));
645
646 INFO("DDR: ECC error details, buffer entry[%d]:", j);
647 INFO("- err info addr: 0x%x", address);
648 INFO("- err ip type: %lu", FIELD_GET(ECC_ERR_IP_TYPE_MASK, ecc_err_data));
649 INFO("- err instance id: %lu",
650 FIELD_GET(ECC_ERR_INSTANCE_ID_MASK, ecc_err_data));
651 INFO("- err source id: %lu",
652 FIELD_GET(ECC_ERR_SOURCE_ID_MASK, ecc_err_data));
653 INFO("- err type: %lu", FIELD_GET(ECC_ERR_TYPE_MASK, ecc_err_data));
654 INFO("- err addr upper: 0x%lx",
655 FIELD_GET(ECC_ERR_ADDR_UPPER_MASK, ecc_err_data));
656 INFO("- err addr lower: 0x%x", ecc_err_data_lo);
657
658 if (is_double_bit_error(FIELD_GET(ECC_ERR_TYPE_MASK, ecc_err_data))) {
659 ecc_dbe_err_flag = true;
660 }
661
662 address += sizeof(uint32_t) * 2;
663 }
664
665 NOTICE("DDR: ECC error count value %d", ecc_err_counter);
666 NOTICE("DDR: ECC error overflow field 0x%lx",
667 FIELD_GET(ECC_ERR_OVERFLOW_MASK, ecc_err_status));
668
669 /* Clear the ECC error buffer using MMIO write (replacing io96b_mb_req) */
670 uint32_t cmd_req = (IOSSM_ECC_CLEAR_ERR_BUFFER << 0) |
671 (CMD_TRIG_CONTROLLER_OP << 16) |
672 (instance_id << 24) |
673 (ip_type << 29);
674
675 mmio_write_32(base + IOSSM_CMD_REQ_OFFSET, cmd_req);
676 mmio_write_32(base + IOSSM_CONTROLLER_TRIGGER_OFFSET, IOSSM_ECC_CLEAR_ERR_BUFFER);
677
678 /* Read back to confirm reset */
679 uint32_t curr_status = mmio_read_32(base + IOSSM_ECC_ERR_STATUS_OFFSET);
680
681 INFO("DDR: %s: Post reset, ECC error count on IO96B_%d: %lu",
682 __func__, i, FIELD_GET(ECC_ERR_COUNTER_MASK, curr_status));
683 }
684
685 return ecc_dbe_err_flag;
686 }
687
out_of_band_ecc_bist_mem_init(struct io96b_info * io96b_ctrl)688 static int out_of_band_ecc_bist_mem_init(struct io96b_info *io96b_ctrl)
689 {
690 struct io96b_mb_resp usr_resp;
691 int i, j;
692 bool bist_start, bist_success;
693 uint32_t read_count = 0;
694 uint32_t read_interval_ms = 500U;
695
696 const uint32_t mem_init_status_offset[MAX_MEM_INTERFACE_SUPPORTED] = {
697 IOSSM_MEM_INIT_STATUS_INTF0_OFFSET,
698 IOSSM_MEM_INIT_STATUS_INTF1_OFFSET
699 };
700
701 for (i = 0; i < io96b_ctrl->num_instance; i++) {
702 struct io96b_instance *inst = (i == 0) ?
703 &io96b_ctrl->io96b_0 : &io96b_ctrl->io96b_1;
704
705 for (j = 0; j < inst->mb_ctrl.num_mem_interface; j++) {
706 bist_start = false;
707 bist_success = false;
708
709 /* Start memory initialization BIST on full memory address */
710 io96b_mb_req(inst->io96b_csr_addr,
711 inst->mb_ctrl.ip_type[j],
712 inst->mb_ctrl.ip_instance_id[j],
713 CMD_TRIG_CONTROLLER_OP, BIST_MEM_INIT_START, 0x40,
714 0, 0, 0, 0, 0, 0, 0, &usr_resp);
715
716 bist_start = IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status) & 1;
717
718 if (!bist_start)
719 return -ENOEXEC;
720
721 read_count = TIMEOUT / read_interval_ms;
722
723 /* Polling for memory initialization BIST status */
724 while (!bist_success) {
725 uint32_t status = mmio_read_32(inst->io96b_csr_addr +
726 mem_init_status_offset[j]);
727
728 bist_success = FIELD_GET(INTF_BIST_STATUS_MASK, status);
729
730 if (!bist_success && read_count == 0U) {
731 ERROR("IOSSM: %s: Timeout init memory on IO96B_%d\n",
732 __func__, i);
733 ERROR("IOSSM: %s: BIST_MEM_INIT_STATUS Err code 0x%x\n",
734 __func__, (IOSSM_CMD_RESPONSE_DATA_SHORT
735 (usr_resp.cmd_resp_status) & GENMASK(2, 1)) > 0x1);
736 return -ETIMEDOUT;
737 }
738 read_count--;
739 mdelay(read_interval_ms);
740 }
741 }
742 NOTICE("IOSSM: Memory initialized successfully on IO96B\n");
743 }
744 return 0;
745 }
746
inline_ecc_bist_mem_init(struct io96b_info * io96b_ctrl)747 static int inline_ecc_bist_mem_init(struct io96b_info *io96b_ctrl)
748 {
749 int i, j;
750 struct io96b_mb_resp usr_resp;
751 bool bist_start, bist_success;
752 phys_size_t mem_size, chunk_size;
753 uint32_t mem_exp, cmd_param_0, cmd_param_1, cmd_param_2;
754 uint32_t read_count, read_interval_ms = 500U;
755
756 const uint32_t mem_init_status_offset[] = { 0x260, 0x2E0 };
757
758 for (i = 0; i < io96b_ctrl->num_instance; i++) {
759 struct io96b_instance *inst = (i == 0) ?
760 &io96b_ctrl->io96b_0 : &io96b_ctrl->io96b_1;
761
762 for (j = 0; j < inst->mb_ctrl.num_mem_interface; j++) {
763 mem_size = inst->mb_ctrl.memory_size[j];
764 chunk_size = mem_size;
765 mem_exp = 0;
766 bist_start = false;
767 bist_success = false;
768
769 if (mem_size == 0 || (mem_size & (mem_size - 1)) != 0) {
770 ERROR("IOSSM: %s: Wrong memory size - not power of 2!\n", __func__);
771 return -ENOEXEC;
772 }
773
774 while (chunk_size >>= 1)
775 mem_exp++;
776
777 cmd_param_0 = FIELD_PREP(BIST_START_ADDR_SPACE_MASK, mem_exp);
778 cmd_param_1 = FIELD_GET(BIST_START_ADDR_LOW_MASK, 0);
779 cmd_param_2 = FIELD_GET(BIST_START_ADDR_HIGH_MASK, 0);
780
781 io96b_mb_req(inst->io96b_csr_addr,
782 inst->mb_ctrl.ip_type[j],
783 inst->mb_ctrl.ip_instance_id[j],
784 CMD_TRIG_CONTROLLER_OP, BIST_MEM_INIT_START,
785 cmd_param_0, cmd_param_1, cmd_param_2,
786 0, 0, 0, 0, 0, &usr_resp);
787
788 bist_start = IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status) & 1;
789
790 if (!bist_start) {
791 ERROR("IOSSM: %s: Failed to initialize memory on IO96B_%d\n",
792 __func__, i);
793 ERROR("IOSSM: %s: BIST_MEM_INIT_START Error code 0x%x\n",
794 __func__,
795 (IOSSM_CMD_RESPONSE_DATA_SHORT(usr_resp.cmd_resp_status)
796 & GENMASK(2, 1)) > 0x1);
797 return -ENOEXEC;
798 }
799
800 read_count = read_interval_ms / TIMEOUT;
801
802 while (!bist_success) {
803 uint32_t status = mmio_read_32(inst->io96b_csr_addr +
804 mem_init_status_offset[j]);
805
806 bist_success = status & BIT(0);
807
808 if ((!bist_success) && (read_count == 0U)) {
809 ERROR("IOSSM: %s: Timeout init memory on IO96B_%d\n",
810 __func__, i);
811 ERROR("IOSSM: %s: BIST_MEM_INIT_STATUS raw status = 0x%x\n",
812 __func__, status);
813 return -ETIMEDOUT;
814 }
815 read_count--;
816 mdelay(read_interval_ms);
817 }
818 }
819
820 NOTICE("IOSSM: %s: Memory initialized successfully on IO96B_%d\n", __func__, i);
821 }
822 return 0;
823 }
824
825 /*
826 * Memory initialization BIST (Built-In Self-Test) start function.
827 * This function will call either inline ECC or out-of-band BIST memory init
828 * based on the ECC type status.
829 */
bist_mem_init_start(struct io96b_info * io96b_ctrl)830 int bist_mem_init_start(struct io96b_info *io96b_ctrl)
831 {
832 if (io96b_ctrl->is_inline_ecc)
833 return inline_ecc_bist_mem_init(io96b_ctrl);
834 else
835 return out_of_band_ecc_bist_mem_init(io96b_ctrl);
836 }
837