xref: /arm-trusted-firmware/plat/imx/imx8m/ddr/ddr4_dvfs.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright 2018-2023 NXP
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu 
7*91f16700Schasinglulu #include <drivers/delay_timer.h>
8*91f16700Schasinglulu #include <lib/mmio.h>
9*91f16700Schasinglulu 
10*91f16700Schasinglulu #include <dram.h>
11*91f16700Schasinglulu 
12*91f16700Schasinglulu void ddr4_mr_write(uint32_t mr, uint32_t data, uint32_t mr_type,
13*91f16700Schasinglulu 	uint32_t rank, uint32_t dram_type)
14*91f16700Schasinglulu {
15*91f16700Schasinglulu 	uint32_t val, mr_mirror, data_mirror;
16*91f16700Schasinglulu 
17*91f16700Schasinglulu 	/*
18*91f16700Schasinglulu 	 * 1. Poll MRSTAT.mr_wr_busy until it is 0 to make sure
19*91f16700Schasinglulu 	 * that there is no outstanding MR transAction.
20*91f16700Schasinglulu 	 */
21*91f16700Schasinglulu 
22*91f16700Schasinglulu 	/*
23*91f16700Schasinglulu 	 * ERR050712:
24*91f16700Schasinglulu 	 * When performing a software driven MR access, the following sequence
25*91f16700Schasinglulu 	 * must be done automatically before performing other APB register accesses.
26*91f16700Schasinglulu 	 * 1. Set MRCTRL0.mr_wr=1
27*91f16700Schasinglulu 	 * 2. Check for MRSTAT.mr_wr_busy=0. If not, go to step (2)
28*91f16700Schasinglulu 	 * 3. Check for MRSTAT.mr_wr_busy=0 again (for the second time). If not, go to step (2)
29*91f16700Schasinglulu 	 */
30*91f16700Schasinglulu 	mmio_setbits_32(DDRC_MRCTRL0(0), BIT(31));
31*91f16700Schasinglulu 
32*91f16700Schasinglulu 	do {
33*91f16700Schasinglulu 		while (mmio_read_32(DDRC_MRSTAT(0)) & 0x1) {
34*91f16700Schasinglulu 			;
35*91f16700Schasinglulu 		}
36*91f16700Schasinglulu 
37*91f16700Schasinglulu 	} while (mmio_read_32(DDRC_MRSTAT(0)) & 0x1);
38*91f16700Schasinglulu 
39*91f16700Schasinglulu 	/*
40*91f16700Schasinglulu 	 * 2. Write the MRCTRL0.mr_type, MRCTRL0.mr_addr, MRCTRL0.mr_rank
41*91f16700Schasinglulu 	 * and (for MRWs) MRCTRL1.mr_data to define the MR transaction.
42*91f16700Schasinglulu 	 */
43*91f16700Schasinglulu 	val = mmio_read_32(DDRC_DIMMCTL(0));
44*91f16700Schasinglulu 	if ((val & 0x2) && (rank == 0x2)) {
45*91f16700Schasinglulu 		mr_mirror = (mr & 0x4) | ((mr & 0x1) << 1) | ((mr & 0x2) >> 1); /* BA0, BA1 swap */
46*91f16700Schasinglulu 		if (dram_type == DDRC_DDR4) {
47*91f16700Schasinglulu 			data_mirror = (data & 0x1607) | ((data & 0x8) << 1) | ((data & 0x10) >> 1) |
48*91f16700Schasinglulu 				((data & 0x20) << 1) | ((data & 0x40) >> 1) | ((data & 0x80) << 1) |
49*91f16700Schasinglulu 				((data & 0x100) >> 1) | ((data & 0x800) << 2) | ((data & 0x2000) >> 2) ;
50*91f16700Schasinglulu 		} else {
51*91f16700Schasinglulu 			data_mirror = (data & 0xfe07) | ((data & 0x8) << 1) | ((data & 0x10) >> 1) |
52*91f16700Schasinglulu 				 ((data & 0x20) << 1) | ((data & 0x40) >> 1) | ((data & 0x80) << 1) |
53*91f16700Schasinglulu 				 ((data & 0x100) >> 1);
54*91f16700Schasinglulu 		}
55*91f16700Schasinglulu 	} else {
56*91f16700Schasinglulu 		mr_mirror = mr;
57*91f16700Schasinglulu 		data_mirror = data;
58*91f16700Schasinglulu 	}
59*91f16700Schasinglulu 
60*91f16700Schasinglulu 	mmio_write_32(DDRC_MRCTRL0(0), mr_type | (mr_mirror << 12) | (rank << 4));
61*91f16700Schasinglulu 	mmio_write_32(DDRC_MRCTRL1(0), data_mirror);
62*91f16700Schasinglulu 
63*91f16700Schasinglulu 	/*
64*91f16700Schasinglulu 	 * 3. In a separate APB transaction, write the MRCTRL0.mr_wr to 1.
65*91f16700Schasinglulu 	 * This bit is self-clearing, and triggers the MR transaction.
66*91f16700Schasinglulu 	 * The uMCTL2 then asserts the MRSTAT.mr_wr_busy while it performs
67*91f16700Schasinglulu 	 * the MR transaction to SDRAM, and no further accesses can be
68*91f16700Schasinglulu 	 * initiated until it is deasserted.
69*91f16700Schasinglulu 	 */
70*91f16700Schasinglulu 	mmio_setbits_32(DDRC_MRCTRL0(0), BIT(31));
71*91f16700Schasinglulu 
72*91f16700Schasinglulu 	while (mmio_read_32(DDRC_MRSTAT(0))) {
73*91f16700Schasinglulu 		;
74*91f16700Schasinglulu 	}
75*91f16700Schasinglulu }
76*91f16700Schasinglulu 
77*91f16700Schasinglulu void dram_cfg_all_mr(struct dram_info *info, uint32_t pstate)
78*91f16700Schasinglulu {
79*91f16700Schasinglulu 	uint32_t num_rank = info->num_rank;
80*91f16700Schasinglulu 	uint32_t dram_type = info->dram_type;
81*91f16700Schasinglulu 	/*
82*91f16700Schasinglulu 	 * 15. Perform MRS commands as required to re-program
83*91f16700Schasinglulu 	 * timing registers in the SDRAM for the new frequency
84*91f16700Schasinglulu 	 * (in particular, CL, CWL and WR may need to be changed).
85*91f16700Schasinglulu 	 */
86*91f16700Schasinglulu 
87*91f16700Schasinglulu 	for (int i = 1; i <= num_rank; i++) {
88*91f16700Schasinglulu 		for (int j = 0; j < 6; j++) {
89*91f16700Schasinglulu 			ddr4_mr_write(j, info->mr_table[pstate][j], 0, i, dram_type);
90*91f16700Schasinglulu 		}
91*91f16700Schasinglulu 		ddr4_mr_write(6, info->mr_table[pstate][7], 0, i, dram_type);
92*91f16700Schasinglulu 	}
93*91f16700Schasinglulu }
94*91f16700Schasinglulu 
95*91f16700Schasinglulu void sw_pstate(uint32_t pstate, uint32_t drate)
96*91f16700Schasinglulu {
97*91f16700Schasinglulu 	uint32_t val;
98*91f16700Schasinglulu 
99*91f16700Schasinglulu 	mmio_write_32(DDRC_SWCTL(0), 0x0);
100*91f16700Schasinglulu 
101*91f16700Schasinglulu 	/*
102*91f16700Schasinglulu 	 * Update any registers which may be required to
103*91f16700Schasinglulu 	 * change for the new frequency.
104*91f16700Schasinglulu 	 */
105*91f16700Schasinglulu 	mmio_write_32(DDRC_MSTR2(0), pstate);
106*91f16700Schasinglulu 	mmio_setbits_32(DDRC_MSTR(0), (0x1 << 29));
107*91f16700Schasinglulu 
108*91f16700Schasinglulu 	/*
109*91f16700Schasinglulu 	 * Toggle RFSHCTL3.refresh_update_level to allow the
110*91f16700Schasinglulu 	 * new refresh-related register values to propagate
111*91f16700Schasinglulu 	 * to the refresh logic.
112*91f16700Schasinglulu 	 */
113*91f16700Schasinglulu 	val = mmio_read_32(DDRC_RFSHCTL3(0));
114*91f16700Schasinglulu 	if (val & 0x2) {
115*91f16700Schasinglulu 		mmio_write_32(DDRC_RFSHCTL3(0), val & 0xFFFFFFFD);
116*91f16700Schasinglulu 	} else {
117*91f16700Schasinglulu 		mmio_write_32(DDRC_RFSHCTL3(0), val | 0x2);
118*91f16700Schasinglulu 	}
119*91f16700Schasinglulu 
120*91f16700Schasinglulu 	/*
121*91f16700Schasinglulu 	 * 19. If required, trigger the initialization in the PHY.
122*91f16700Schasinglulu 	 * If using the gen2 multiPHY, PLL initialization should
123*91f16700Schasinglulu 	 * be triggered at this point. See the PHY databook for
124*91f16700Schasinglulu 	 * details about the frequency change procedure.
125*91f16700Schasinglulu 	 */
126*91f16700Schasinglulu 	mmio_write_32(DDRC_DFIMISC(0), 0x00000000 | (pstate << 8));
127*91f16700Schasinglulu 	mmio_write_32(DDRC_DFIMISC(0), 0x00000020 | (pstate << 8));
128*91f16700Schasinglulu 
129*91f16700Schasinglulu 	/* wait DFISTAT.dfi_init_complete to 0 */
130*91f16700Schasinglulu 	while (mmio_read_32(DDRC_DFISTAT(0)) & 0x1) {
131*91f16700Schasinglulu 		;
132*91f16700Schasinglulu 	}
133*91f16700Schasinglulu 
134*91f16700Schasinglulu 	/* change the clock to the target frequency */
135*91f16700Schasinglulu 	dram_clock_switch(drate, false);
136*91f16700Schasinglulu 
137*91f16700Schasinglulu 	mmio_write_32(DDRC_DFIMISC(0), 0x00000000 | (pstate << 8));
138*91f16700Schasinglulu 
139*91f16700Schasinglulu 	/* wait DFISTAT.dfi_init_complete to 1 */
140*91f16700Schasinglulu 	while (!(mmio_read_32(DDRC_DFISTAT(0)) & 0x1)) {
141*91f16700Schasinglulu 		;
142*91f16700Schasinglulu 	}
143*91f16700Schasinglulu 
144*91f16700Schasinglulu 	/*
145*91f16700Schasinglulu 	 * When changing frequencies the controller may violate the JEDEC
146*91f16700Schasinglulu 	 * requirement that no more than 16 refreshes should be issued within
147*91f16700Schasinglulu 	 * 2*tREFI. These extra refreshes are not expected to cause a problem
148*91f16700Schasinglulu 	 * in the SDRAM. This issue can be avoided by waiting for at least 2*tREFI
149*91f16700Schasinglulu 	 * before exiting self-refresh in step 19.
150*91f16700Schasinglulu 	 */
151*91f16700Schasinglulu 	udelay(14);
152*91f16700Schasinglulu 
153*91f16700Schasinglulu 	/* 14. Exit the self-refresh state by setting PWRCTL.selfref_sw = 0. */
154*91f16700Schasinglulu 	mmio_clrbits_32(DDRC_PWRCTL(0), (1 << 5));
155*91f16700Schasinglulu 
156*91f16700Schasinglulu 	while ((mmio_read_32(DDRC_STAT(0)) & 0x3f) == 0x23) {
157*91f16700Schasinglulu 		;
158*91f16700Schasinglulu 	}
159*91f16700Schasinglulu }
160*91f16700Schasinglulu 
161*91f16700Schasinglulu void ddr4_swffc(struct dram_info *info, unsigned int pstate)
162*91f16700Schasinglulu {
163*91f16700Schasinglulu 	uint32_t drate = info->timing_info->fsp_table[pstate];
164*91f16700Schasinglulu 
165*91f16700Schasinglulu 	/*
166*91f16700Schasinglulu 	 * 1. set SWCTL.sw_done to disable quasi-dynamic register
167*91f16700Schasinglulu 	 * programming outside reset.
168*91f16700Schasinglulu 	 */
169*91f16700Schasinglulu 	mmio_write_32(DDRC_SWCTL(0), 0x0);
170*91f16700Schasinglulu 
171*91f16700Schasinglulu 	/*
172*91f16700Schasinglulu 	 * 2. Write 0 to PCTRL_n.port_en. This blocks AXI port(s)
173*91f16700Schasinglulu 	 * from taking any transaction (blocks traffic on AXI ports).
174*91f16700Schasinglulu 	 */
175*91f16700Schasinglulu 	mmio_write_32(DDRC_PCTRL_0(0), 0x0);
176*91f16700Schasinglulu 
177*91f16700Schasinglulu 	/*
178*91f16700Schasinglulu 	 * 3. Poll PSTAT.rd_port_busy_n=0 and PSTAT.wr_port_busy_n=0.
179*91f16700Schasinglulu 	 * Wait until all AXI ports are idle (the uMCTL2 core has to
180*91f16700Schasinglulu 	 * be idle).
181*91f16700Schasinglulu 	 */
182*91f16700Schasinglulu 	while (mmio_read_32(DDRC_PSTAT(0)) & 0x10001) {
183*91f16700Schasinglulu 		;
184*91f16700Schasinglulu 	}
185*91f16700Schasinglulu 
186*91f16700Schasinglulu 	/*
187*91f16700Schasinglulu 	 * 4. Write 0 to SBRCTL.scrub_en. Disable SBR, required only if
188*91f16700Schasinglulu 	 * SBR instantiated.
189*91f16700Schasinglulu 	 * 5. Poll SBRSTAT.scrub_busy=0.
190*91f16700Schasinglulu 	 * 6. Set DERATEEN.derate_enable = 0, if DERATEEN.derate_eanble = 1
191*91f16700Schasinglulu 	 * and the read latency (RL) value needs to change after the frequency
192*91f16700Schasinglulu 	 * change (LPDDR2/3/4 only).
193*91f16700Schasinglulu 	 * 7. Set DBG1.dis_hif=1 so that no new commands will be accepted by the uMCTL2.
194*91f16700Schasinglulu 	 */
195*91f16700Schasinglulu 	mmio_setbits_32(DDRC_DBG1(0), (0x1 << 1));
196*91f16700Schasinglulu 
197*91f16700Schasinglulu 	/*
198*91f16700Schasinglulu 	 * 8. Poll DBGCAM.dbg_wr_q_empty and DBGCAM.dbg_rd_q_empty to ensure
199*91f16700Schasinglulu 	 * that write and read data buffers are empty.
200*91f16700Schasinglulu 	 */
201*91f16700Schasinglulu 	while ((mmio_read_32(DDRC_DBGCAM(0)) & 0x06000000) != 0x06000000) {
202*91f16700Schasinglulu 		;
203*91f16700Schasinglulu 	}
204*91f16700Schasinglulu 
205*91f16700Schasinglulu 	/*
206*91f16700Schasinglulu 	 * 9. For DDR4, update MR6 with the new tDLLK value via the Mode
207*91f16700Schasinglulu 	 * Register Write signals
208*91f16700Schasinglulu 	 * 10. Set DFILPCFG0.dfi_lp_en_sr = 0, if DFILPCFG0.dfi_lp_en_sr = 1,
209*91f16700Schasinglulu 	 * and wait until DFISTAT.dfi_lp_ack
210*91f16700Schasinglulu 	 * 11. If DFI PHY Master interface is active in uMCTL2, then disable it
211*91f16700Schasinglulu 	 * 12. Wait until STAT.operating_mode[1:0]!=11 indicating that the
212*91f16700Schasinglulu 	 * controller is not in self-refresh mode.
213*91f16700Schasinglulu 	 */
214*91f16700Schasinglulu 	if ((mmio_read_32(DDRC_STAT(0)) & 0x3) == 0x3) {
215*91f16700Schasinglulu 		VERBOSE("DRAM is in Self Refresh\n");
216*91f16700Schasinglulu 	}
217*91f16700Schasinglulu 
218*91f16700Schasinglulu 	/*
219*91f16700Schasinglulu 	 * 13. Assert PWRCTL.selfref_sw for the DWC_ddr_umctl2 core to enter
220*91f16700Schasinglulu 	 * the self-refresh mode.
221*91f16700Schasinglulu 	 */
222*91f16700Schasinglulu 	mmio_setbits_32(DDRC_PWRCTL(0), (1 << 5));
223*91f16700Schasinglulu 
224*91f16700Schasinglulu 	/*
225*91f16700Schasinglulu 	 * 14. Wait until STAT.operating_mode[1:0]==11 indicating that the
226*91f16700Schasinglulu 	 * controller core is in self-refresh mode.
227*91f16700Schasinglulu 	 */
228*91f16700Schasinglulu 	while ((mmio_read_32(DDRC_STAT(0)) & 0x3f) != 0x23) {
229*91f16700Schasinglulu 		;
230*91f16700Schasinglulu 	}
231*91f16700Schasinglulu 
232*91f16700Schasinglulu 	sw_pstate(pstate, drate);
233*91f16700Schasinglulu 	dram_cfg_all_mr(info, pstate);
234*91f16700Schasinglulu 
235*91f16700Schasinglulu 	/* 23. Enable HIF commands by setting DBG1.dis_hif=0. */
236*91f16700Schasinglulu 	mmio_clrbits_32(DDRC_DBG1(0), (0x1 << 1));
237*91f16700Schasinglulu 
238*91f16700Schasinglulu 	/*
239*91f16700Schasinglulu 	 * 24. Reset DERATEEN.derate_enable = 1 if DERATEEN.derate_enable
240*91f16700Schasinglulu 	 * has been set to 0 in step 6.
241*91f16700Schasinglulu 	 * 25. If DFI PHY Master interface was active before step 11 then
242*91f16700Schasinglulu 	 * enable it back by programming DFIPHYMSTR.phymstr_en = 1'b1.
243*91f16700Schasinglulu 	 * 26. Write 1 to PCTRL_n.port_en. AXI port(s) are no longer blocked
244*91f16700Schasinglulu 	 * from taking transactions (Re-enable traffic on AXI ports)
245*91f16700Schasinglulu 	 */
246*91f16700Schasinglulu 	mmio_write_32(DDRC_PCTRL_0(0), 0x1);
247*91f16700Schasinglulu 
248*91f16700Schasinglulu 	/*
249*91f16700Schasinglulu 	 * 27. Write 1 to SBRCTL.scrub_en. Enable SBR if desired, only
250*91f16700Schasinglulu 	 * required if SBR instantiated.
251*91f16700Schasinglulu 	 */
252*91f16700Schasinglulu 
253*91f16700Schasinglulu 	/*
254*91f16700Schasinglulu 	 * set SWCTL.sw_done to enable quasi-dynamic register programming
255*91f16700Schasinglulu 	 * outside reset.
256*91f16700Schasinglulu 	 */
257*91f16700Schasinglulu 	mmio_write_32(DDRC_SWCTL(0), 0x1);
258*91f16700Schasinglulu 
259*91f16700Schasinglulu 	/* wait SWSTAT.sw_done_ack to 1 */
260*91f16700Schasinglulu 	while (!(mmio_read_32(DDRC_SWSTAT(0)) & 0x1)) {
261*91f16700Schasinglulu 		;
262*91f16700Schasinglulu 	}
263*91f16700Schasinglulu }
264