blob: 03674e609ffce42c28eea7ee88539308a65b5c73 [file] [log] [blame]
Igor Sarkisov019b7732020-10-01 13:39:00 -07001/*
2 *
3 * Clock initialization for OMAP4
4 *
5 * (C) Copyright 2010
6 * Texas Instruments, <www.ti.com>
7 *
8 * Aneesh V <aneesh@ti.com>
9 *
10 * Based on previous work by:
11 * Santosh Shilimkar <santosh.shilimkar@ti.com>
12 * Rajendra Nayak <rnayak@ti.com>
13 *
14 * SPDX-License-Identifier: GPL-2.0+
15 */
16#include <common.h>
17#include <i2c.h>
18#include <asm/omap_common.h>
19#include <asm/gpio.h>
20#include <asm/arch/clock.h>
21#include <asm/arch/sys_proto.h>
22#include <asm/utils.h>
23#include <asm/omap_gpio.h>
24#include <asm/emif.h>
25
26#ifndef CONFIG_SPL_BUILD
27/*
28 * printing to console doesn't work unless
29 * this code is executed from SPL
30 */
31#define printf(fmt, args...)
32#define puts(s)
33#endif
34
35const u32 sys_clk_array[8] = {
36 12000000, /* 12 MHz */
37 20000000, /* 20 MHz */
38 16800000, /* 16.8 MHz */
39 19200000, /* 19.2 MHz */
40 26000000, /* 26 MHz */
41 27000000, /* 27 MHz */
42 38400000, /* 38.4 MHz */
43};
44
45static inline u32 __get_sys_clk_index(void)
46{
47 s8 ind;
48 /*
49 * For ES1 the ROM code calibration of sys clock is not reliable
50 * due to hw issue. So, use hard-coded value. If this value is not
51 * correct for any board over-ride this function in board file
52 * From ES2.0 onwards you will get this information from
53 * CM_SYS_CLKSEL
54 */
55 if (omap_revision() == OMAP4430_ES1_0)
56 ind = OMAP_SYS_CLK_IND_38_4_MHZ;
57 else {
58 /* SYS_CLKSEL - 1 to match the dpll param array indices */
59 ind = (readl((*prcm)->cm_sys_clksel) &
60 CM_SYS_CLKSEL_SYS_CLKSEL_MASK) - 1;
61 }
62 return ind;
63}
64
65u32 get_sys_clk_index(void)
66 __attribute__ ((weak, alias("__get_sys_clk_index")));
67
68u32 get_sys_clk_freq(void)
69{
70 u8 index = get_sys_clk_index();
71 return sys_clk_array[index];
72}
73
74void setup_post_dividers(u32 const base, const struct dpll_params *params)
75{
76 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
77
78 /* Setup post-dividers */
79 if (params->m2 >= 0)
80 writel(params->m2, &dpll_regs->cm_div_m2_dpll);
81 if (params->m3 >= 0)
82 writel(params->m3, &dpll_regs->cm_div_m3_dpll);
83 if (params->m4_h11 >= 0)
84 writel(params->m4_h11, &dpll_regs->cm_div_m4_h11_dpll);
85 if (params->m5_h12 >= 0)
86 writel(params->m5_h12, &dpll_regs->cm_div_m5_h12_dpll);
87 if (params->m6_h13 >= 0)
88 writel(params->m6_h13, &dpll_regs->cm_div_m6_h13_dpll);
89 if (params->m7_h14 >= 0)
90 writel(params->m7_h14, &dpll_regs->cm_div_m7_h14_dpll);
91 if (params->h21 >= 0)
92 writel(params->h21, &dpll_regs->cm_div_h21_dpll);
93 if (params->h22 >= 0)
94 writel(params->h22, &dpll_regs->cm_div_h22_dpll);
95 if (params->h23 >= 0)
96 writel(params->h23, &dpll_regs->cm_div_h23_dpll);
97 if (params->h24 >= 0)
98 writel(params->h24, &dpll_regs->cm_div_h24_dpll);
99}
100
101static inline void do_bypass_dpll(u32 const base)
102{
103 struct dpll_regs *dpll_regs = (struct dpll_regs *)base;
104
105 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
106 CM_CLKMODE_DPLL_DPLL_EN_MASK,
107 DPLL_EN_FAST_RELOCK_BYPASS <<
108 CM_CLKMODE_DPLL_EN_SHIFT);
109}
110
111static inline void wait_for_bypass(u32 const base)
112{
113 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
114
115 if (!wait_on_value(ST_DPLL_CLK_MASK, 0, &dpll_regs->cm_idlest_dpll,
116 LDELAY)) {
117 printf("Bypassing DPLL failed %x\n", base);
118 }
119}
120
121static inline void do_lock_dpll(u32 const base)
122{
123 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
124
125 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
126 CM_CLKMODE_DPLL_DPLL_EN_MASK,
127 DPLL_EN_LOCK << CM_CLKMODE_DPLL_EN_SHIFT);
128}
129
130static inline void wait_for_lock(u32 const base)
131{
132 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
133
134 if (!wait_on_value(ST_DPLL_CLK_MASK, ST_DPLL_CLK_MASK,
135 &dpll_regs->cm_idlest_dpll, LDELAY)) {
136 printf("DPLL locking failed for %x\n", base);
137 hang();
138 }
139}
140
141inline u32 check_for_lock(u32 const base)
142{
143 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
144 u32 lock = readl(&dpll_regs->cm_idlest_dpll) & ST_DPLL_CLK_MASK;
145
146 return lock;
147}
148
149const struct dpll_params *get_mpu_dpll_params(struct dplls const *dpll_data)
150{
151 u32 sysclk_ind = get_sys_clk_index();
152 return &dpll_data->mpu[sysclk_ind];
153}
154
155const struct dpll_params *get_core_dpll_params(struct dplls const *dpll_data)
156{
157 u32 sysclk_ind = get_sys_clk_index();
158 return &dpll_data->core[sysclk_ind];
159}
160
161const struct dpll_params *get_per_dpll_params(struct dplls const *dpll_data)
162{
163 u32 sysclk_ind = get_sys_clk_index();
164 return &dpll_data->per[sysclk_ind];
165}
166
167const struct dpll_params *get_iva_dpll_params(struct dplls const *dpll_data)
168{
169 u32 sysclk_ind = get_sys_clk_index();
170 return &dpll_data->iva[sysclk_ind];
171}
172
173const struct dpll_params *get_usb_dpll_params(struct dplls const *dpll_data)
174{
175 u32 sysclk_ind = get_sys_clk_index();
176 return &dpll_data->usb[sysclk_ind];
177}
178
179const struct dpll_params *get_abe_dpll_params(struct dplls const *dpll_data)
180{
181#ifdef CONFIG_SYS_OMAP_ABE_SYSCK
182 u32 sysclk_ind = get_sys_clk_index();
183 return &dpll_data->abe[sysclk_ind];
184#else
185 return dpll_data->abe;
186#endif
187}
188
189static const struct dpll_params *get_ddr_dpll_params
190 (struct dplls const *dpll_data)
191{
192 u32 sysclk_ind = get_sys_clk_index();
193
194 if (!dpll_data->ddr)
195 return NULL;
196 return &dpll_data->ddr[sysclk_ind];
197}
198
199#ifdef CONFIG_DRIVER_TI_CPSW
200static const struct dpll_params *get_gmac_dpll_params
201 (struct dplls const *dpll_data)
202{
203 u32 sysclk_ind = get_sys_clk_index();
204
205 if (!dpll_data->gmac)
206 return NULL;
207 return &dpll_data->gmac[sysclk_ind];
208}
209#endif
210
211static void do_setup_dpll(u32 const base, const struct dpll_params *params,
212 u8 lock, char *dpll)
213{
214 u32 temp, M, N;
215 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
216
217 if (!params)
218 return;
219
220 temp = readl(&dpll_regs->cm_clksel_dpll);
221
222 if (check_for_lock(base)) {
223 /*
224 * The Dpll has already been locked by rom code using CH.
225 * Check if M,N are matching with Ideal nominal opp values.
226 * If matches, skip the rest otherwise relock.
227 */
228 M = (temp & CM_CLKSEL_DPLL_M_MASK) >> CM_CLKSEL_DPLL_M_SHIFT;
229 N = (temp & CM_CLKSEL_DPLL_N_MASK) >> CM_CLKSEL_DPLL_N_SHIFT;
230 if ((M != (params->m)) || (N != (params->n))) {
231 debug("\n %s Dpll locked, but not for ideal M = %d,"
232 "N = %d values, current values are M = %d,"
233 "N= %d" , dpll, params->m, params->n,
234 M, N);
235 } else {
236 /* Dpll locked with ideal values for nominal opps. */
237 debug("\n %s Dpll already locked with ideal"
238 "nominal opp values", dpll);
239 goto setup_post_dividers;
240 }
241 }
242
243 bypass_dpll(base);
244
245 /* Set M & N */
246 temp &= ~CM_CLKSEL_DPLL_M_MASK;
247 temp |= (params->m << CM_CLKSEL_DPLL_M_SHIFT) & CM_CLKSEL_DPLL_M_MASK;
248
249 temp &= ~CM_CLKSEL_DPLL_N_MASK;
250 temp |= (params->n << CM_CLKSEL_DPLL_N_SHIFT) & CM_CLKSEL_DPLL_N_MASK;
251
252 writel(temp, &dpll_regs->cm_clksel_dpll);
253
254 /* Lock */
255 if (lock)
256 do_lock_dpll(base);
257
258setup_post_dividers:
259 setup_post_dividers(base, params);
260
261 /* Wait till the DPLL locks */
262 if (lock)
263 wait_for_lock(base);
264}
265
266u32 omap_ddr_clk(void)
267{
268 u32 ddr_clk, sys_clk_khz, omap_rev, divider;
269 const struct dpll_params *core_dpll_params;
270
271 omap_rev = omap_revision();
272 sys_clk_khz = get_sys_clk_freq() / 1000;
273
274 core_dpll_params = get_core_dpll_params(*dplls_data);
275
276 debug("sys_clk %d\n ", sys_clk_khz * 1000);
277
278 /* Find Core DPLL locked frequency first */
279 ddr_clk = sys_clk_khz * 2 * core_dpll_params->m /
280 (core_dpll_params->n + 1);
281
282 if (omap_rev < OMAP5430_ES1_0) {
283 /*
284 * DDR frequency is PHY_ROOT_CLK/2
285 * PHY_ROOT_CLK = Fdpll/2/M2
286 */
287 divider = 4;
288 } else {
289 /*
290 * DDR frequency is PHY_ROOT_CLK
291 * PHY_ROOT_CLK = Fdpll/2/M2
292 */
293 divider = 2;
294 }
295
296 ddr_clk = ddr_clk / divider / core_dpll_params->m2;
297 ddr_clk *= 1000; /* convert to Hz */
298 debug("ddr_clk %d\n ", ddr_clk);
299
300 return ddr_clk;
301}
302
303/*
304 * Lock MPU dpll
305 *
306 * Resulting MPU frequencies:
307 * 4430 ES1.0 : 600 MHz
308 * 4430 ES2.x : 792 MHz (OPP Turbo)
309 * 4460 : 920 MHz (OPP Turbo) - DCC disabled
310 */
311void configure_mpu_dpll(void)
312{
313 const struct dpll_params *params;
314 struct dpll_regs *mpu_dpll_regs;
315 u32 omap_rev;
316 omap_rev = omap_revision();
317
318 /*
319 * DCC and clock divider settings for 4460.
320 * DCC is required, if more than a certain frequency is required.
321 * For, 4460 > 1GHZ.
322 * 5430 > 1.4GHZ.
323 */
324 if ((omap_rev >= OMAP4460_ES1_0) && (omap_rev < OMAP5430_ES1_0)) {
325 mpu_dpll_regs =
326 (struct dpll_regs *)((*prcm)->cm_clkmode_dpll_mpu);
327 bypass_dpll((*prcm)->cm_clkmode_dpll_mpu);
328 clrbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
329 MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK);
330 setbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
331 MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK);
332 clrbits_le32(&mpu_dpll_regs->cm_clksel_dpll,
333 CM_CLKSEL_DCC_EN_MASK);
334 }
335
336 params = get_mpu_dpll_params(*dplls_data);
337
338 do_setup_dpll((*prcm)->cm_clkmode_dpll_mpu, params, DPLL_LOCK, "mpu");
339 debug("MPU DPLL locked\n");
340}
341
342#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP)
343static void setup_usb_dpll(void)
344{
345 const struct dpll_params *params;
346 u32 sys_clk_khz, sd_div, num, den;
347
348 sys_clk_khz = get_sys_clk_freq() / 1000;
349 /*
350 * USB:
351 * USB dpll is J-type. Need to set DPLL_SD_DIV for jitter correction
352 * DPLL_SD_DIV = CEILING ([DPLL_MULT/(DPLL_DIV+1)]* CLKINP / 250)
353 * - where CLKINP is sys_clk in MHz
354 * Use CLKINP in KHz and adjust the denominator accordingly so
355 * that we have enough accuracy and at the same time no overflow
356 */
357 params = get_usb_dpll_params(*dplls_data);
358 num = params->m * sys_clk_khz;
359 den = (params->n + 1) * 250 * 1000;
360 num += den - 1;
361 sd_div = num / den;
362 clrsetbits_le32((*prcm)->cm_clksel_dpll_usb,
363 CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK,
364 sd_div << CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT);
365
366 /* Now setup the dpll with the regular function */
367 do_setup_dpll((*prcm)->cm_clkmode_dpll_usb, params, DPLL_LOCK, "usb");
368}
369#endif
370
371static void setup_dplls(void)
372{
373 u32 temp;
374 const struct dpll_params *params;
375
376 debug("setup_dplls\n");
377
378 /* CORE dpll */
379 params = get_core_dpll_params(*dplls_data); /* default - safest */
380 /*
381 * Do not lock the core DPLL now. Just set it up.
382 * Core DPLL will be locked after setting up EMIF
383 * using the FREQ_UPDATE method(freq_update_core())
384 */
385 if (emif_sdram_type() == EMIF_SDRAM_TYPE_LPDDR2)
386 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
387 DPLL_NO_LOCK, "core");
388 else
389 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
390 DPLL_LOCK, "core");
391 /* Set the ratios for CORE_CLK, L3_CLK, L4_CLK */
392 temp = (CLKSEL_CORE_X2_DIV_1 << CLKSEL_CORE_SHIFT) |
393 (CLKSEL_L3_CORE_DIV_2 << CLKSEL_L3_SHIFT) |
394 (CLKSEL_L4_L3_DIV_2 << CLKSEL_L4_SHIFT);
395 writel(temp, (*prcm)->cm_clksel_core);
396 debug("Core DPLL configured\n");
397
398 /* lock PER dpll */
399 params = get_per_dpll_params(*dplls_data);
400 do_setup_dpll((*prcm)->cm_clkmode_dpll_per,
401 params, DPLL_LOCK, "per");
402 debug("PER DPLL locked\n");
403
404 /* MPU dpll */
405 configure_mpu_dpll();
406
407#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP)
408 setup_usb_dpll();
409#endif
410 params = get_ddr_dpll_params(*dplls_data);
411 do_setup_dpll((*prcm)->cm_clkmode_dpll_ddrphy,
412 params, DPLL_LOCK, "ddr");
413
414#ifdef CONFIG_DRIVER_TI_CPSW
415 params = get_gmac_dpll_params(*dplls_data);
416 do_setup_dpll((*prcm)->cm_clkmode_dpll_gmac, params,
417 DPLL_LOCK, "gmac");
418#endif
419}
420
421u32 get_offset_code(u32 volt_offset, struct pmic_data *pmic)
422{
423 u32 offset_code;
424
425 volt_offset -= pmic->base_offset;
426
427 offset_code = (volt_offset + pmic->step - 1) / pmic->step;
428
429 /*
430 * Offset codes 1-6 all give the base voltage in Palmas
431 * Offset code 0 switches OFF the SMPS
432 */
433 return offset_code + pmic->start_code;
434}
435
436void do_scale_vcore(u32 vcore_reg, u32 volt_mv, struct pmic_data *pmic)
437{
438 u32 offset_code;
439 u32 offset = volt_mv;
440#ifndef CONFIG_DRA7XX
441 int ret = 0;
442#endif
443
444 if (!volt_mv)
445 return;
446
447 pmic->pmic_bus_init();
448#ifndef CONFIG_DRA7XX
449 /* See if we can first get the GPIO if needed */
450 if (pmic->gpio_en)
451 ret = gpio_request(pmic->gpio, "PMIC_GPIO");
452
453 if (ret < 0) {
454 printf("%s: gpio %d request failed %d\n", __func__,
455 pmic->gpio, ret);
456 return;
457 }
458
459 /* Pull the GPIO low to select SET0 register, while we program SET1 */
460 if (pmic->gpio_en)
461 gpio_direction_output(pmic->gpio, 0);
462#endif
463 /* convert to uV for better accuracy in the calculations */
464 offset *= 1000;
465
466 offset_code = get_offset_code(offset, pmic);
467
468 debug("do_scale_vcore: volt - %d offset_code - 0x%x\n", volt_mv,
469 offset_code);
470
471 if (pmic->pmic_write(pmic->i2c_slave_addr, vcore_reg, offset_code))
472 printf("Scaling voltage failed for 0x%x\n", vcore_reg);
473#ifndef CONFIG_DRA7XX
474 if (pmic->gpio_en)
475 gpio_direction_output(pmic->gpio, 1);
476#endif
477}
478
479static u32 optimize_vcore_voltage(struct volts const *v)
480{
481 u32 val;
482 if (!v->value)
483 return 0;
484 if (!v->efuse.reg)
485 return v->value;
486
487 switch (v->efuse.reg_bits) {
488 case 16:
489 val = readw(v->efuse.reg);
490 break;
491 case 32:
492 val = readl(v->efuse.reg);
493 break;
494 default:
495 printf("Error: efuse 0x%08x bits=%d unknown\n",
496 v->efuse.reg, v->efuse.reg_bits);
497 return v->value;
498 }
499
500 if (!val) {
501 printf("Error: efuse 0x%08x bits=%d val=0, using %d\n",
502 v->efuse.reg, v->efuse.reg_bits, v->value);
503 return v->value;
504 }
505
506 debug("%s:efuse 0x%08x bits=%d Vnom=%d, using efuse value %d\n",
507 __func__, v->efuse.reg, v->efuse.reg_bits, v->value, val);
508 return val;
509}
510
511/*
512 * Setup the voltages for the main SoC core power domains.
513 * We start with the maximum voltages allowed here, as set in the corresponding
514 * vcores_data struct, and then scale (usually down) to the fused values that
515 * are retrieved from the SoC. The scaling happens only if the efuse.reg fields
516 * are initialised.
517 * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is
518 * compiled conditionally. Note that the new code writes the scaled (or zeroed)
519 * values back to the vcores_data struct for eventual reuse. Zero values mean
520 * that the corresponding rails are not controlled separately, and are not sent
521 * to the PMIC.
522 */
523void scale_vcores(struct vcores_data const *vcores)
524{
525#if defined(CONFIG_DRA7XX)
526 int i;
527 struct volts *pv = (struct volts *)vcores;
528 struct volts *px;
529
530 for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) {
531 debug("%d -> ", pv->value);
532 if (pv->value) {
533 /* Handle non-empty members only */
534 pv->value = optimize_vcore_voltage(pv);
535 px = (struct volts *)vcores;
536 while (px < pv) {
537 /*
538 * Scan already handled non-empty members to see
539 * if we have a group and find the max voltage,
540 * which is set to the first occurance of the
541 * particular SMPS; the other group voltages are
542 * zeroed.
543 */
544 if (px->value) {
545 if ((pv->pmic->i2c_slave_addr ==
546 px->pmic->i2c_slave_addr) &&
547 (pv->addr == px->addr)) {
548 /* Same PMIC, same SMPS */
549 if (pv->value > px->value)
550 px->value = pv->value;
551
552 pv->value = 0;
553 }
554 }
555 px++;
556 }
557 }
558 debug("%d\n", pv->value);
559 pv++;
560 }
561
562 debug("cor: %d\n", vcores->core.value);
563 do_scale_vcore(vcores->core.addr, vcores->core.value, vcores->core.pmic);
564 debug("mpu: %d\n", vcores->mpu.value);
565 do_scale_vcore(vcores->mpu.addr, vcores->mpu.value, vcores->mpu.pmic);
566 /* Configure MPU ABB LDO after scale */
567 abb_setup((*ctrl)->control_std_fuse_opp_vdd_mpu_2,
568 (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
569 (*prcm)->prm_abbldo_mpu_setup,
570 (*prcm)->prm_abbldo_mpu_ctrl,
571 (*prcm)->prm_irqstatus_mpu_2,
572 OMAP_ABB_MPU_TXDONE_MASK,
573 OMAP_ABB_FAST_OPP);
574
575 /* The .mm member is not used for the DRA7xx */
576
577 debug("gpu: %d\n", vcores->gpu.value);
578 do_scale_vcore(vcores->gpu.addr, vcores->gpu.value, vcores->gpu.pmic);
579 debug("eve: %d\n", vcores->eve.value);
580 do_scale_vcore(vcores->eve.addr, vcores->eve.value, vcores->eve.pmic);
581 debug("iva: %d\n", vcores->iva.value);
582 do_scale_vcore(vcores->iva.addr, vcores->iva.value, vcores->iva.pmic);
583 /* Might need udelay(1000) here if debug is enabled to see all prints */
584#else
585 u32 val;
586
587 val = optimize_vcore_voltage(&vcores->core);
588 do_scale_vcore(vcores->core.addr, val, vcores->core.pmic);
589
590 val = optimize_vcore_voltage(&vcores->mpu);
591 do_scale_vcore(vcores->mpu.addr, val, vcores->mpu.pmic);
592
593 /* Configure MPU ABB LDO after scale */
594 abb_setup((*ctrl)->control_std_fuse_opp_vdd_mpu_2,
595 (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
596 (*prcm)->prm_abbldo_mpu_setup,
597 (*prcm)->prm_abbldo_mpu_ctrl,
598 (*prcm)->prm_irqstatus_mpu_2,
599 OMAP_ABB_MPU_TXDONE_MASK,
600 OMAP_ABB_FAST_OPP);
601
602 val = optimize_vcore_voltage(&vcores->mm);
603 do_scale_vcore(vcores->mm.addr, val, vcores->mm.pmic);
604
605 val = optimize_vcore_voltage(&vcores->gpu);
606 do_scale_vcore(vcores->gpu.addr, val, vcores->gpu.pmic);
607
608 val = optimize_vcore_voltage(&vcores->eve);
609 do_scale_vcore(vcores->eve.addr, val, vcores->eve.pmic);
610
611 val = optimize_vcore_voltage(&vcores->iva);
612 do_scale_vcore(vcores->iva.addr, val, vcores->iva.pmic);
613#endif
614}
615
616static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode)
617{
618 clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
619 enable_mode << CD_CLKCTRL_CLKTRCTRL_SHIFT);
620 debug("Enable clock domain - %x\n", clkctrl_reg);
621}
622
623static inline void wait_for_clk_enable(u32 clkctrl_addr)
624{
625 u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_DISABLED;
626 u32 bound = LDELAY;
627
628 while ((idlest == MODULE_CLKCTRL_IDLEST_DISABLED) ||
629 (idlest == MODULE_CLKCTRL_IDLEST_TRANSITIONING)) {
630
631 clkctrl = readl(clkctrl_addr);
632 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
633 MODULE_CLKCTRL_IDLEST_SHIFT;
634 if (--bound == 0) {
635 printf("Clock enable failed for 0x%x idlest 0x%x\n",
636 clkctrl_addr, clkctrl);
637 return;
638 }
639 }
640}
641
642static inline void enable_clock_module(u32 const clkctrl_addr, u32 enable_mode,
643 u32 wait_for_enable)
644{
645 clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
646 enable_mode << MODULE_CLKCTRL_MODULEMODE_SHIFT);
647 debug("Enable clock module - %x\n", clkctrl_addr);
648 if (wait_for_enable)
649 wait_for_clk_enable(clkctrl_addr);
650}
651
652void freq_update_core(void)
653{
654 u32 freq_config1 = 0;
655 const struct dpll_params *core_dpll_params;
656 u32 omap_rev = omap_revision();
657
658 core_dpll_params = get_core_dpll_params(*dplls_data);
659 /* Put EMIF clock domain in sw wakeup mode */
660 enable_clock_domain((*prcm)->cm_memif_clkstctrl,
661 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
662 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
663 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
664
665 freq_config1 = SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK |
666 SHADOW_FREQ_CONFIG1_DLL_RESET_MASK;
667
668 freq_config1 |= (DPLL_EN_LOCK << SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT) &
669 SHADOW_FREQ_CONFIG1_DPLL_EN_MASK;
670
671 freq_config1 |= (core_dpll_params->m2 <<
672 SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT) &
673 SHADOW_FREQ_CONFIG1_M2_DIV_MASK;
674
675 writel(freq_config1, (*prcm)->cm_shadow_freq_config1);
676 if (!wait_on_value(SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK, 0,
677 (u32 *) (*prcm)->cm_shadow_freq_config1, LDELAY)) {
678 puts("FREQ UPDATE procedure failed!!");
679 hang();
680 }
681
682 /*
683 * Putting EMIF in HW_AUTO is seen to be causing issues with
684 * EMIF clocks and the master DLL. Keep EMIF in SW_WKUP
685 * in OMAP5430 ES1.0 silicon
686 */
687 if (omap_rev != OMAP5430_ES1_0) {
688 /* Put EMIF clock domain back in hw auto mode */
689 enable_clock_domain((*prcm)->cm_memif_clkstctrl,
690 CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
691 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
692 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
693 }
694}
695
696void bypass_dpll(u32 const base)
697{
698 do_bypass_dpll(base);
699 wait_for_bypass(base);
700}
701
702void lock_dpll(u32 const base)
703{
704 do_lock_dpll(base);
705 wait_for_lock(base);
706}
707
708void setup_clocks_for_console(void)
709{
710 /* Do not add any spl_debug prints in this function */
711 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
712 CD_CLKCTRL_CLKTRCTRL_SW_WKUP <<
713 CD_CLKCTRL_CLKTRCTRL_SHIFT);
714
715 /* Enable all UARTs - console will be on one of them */
716 clrsetbits_le32((*prcm)->cm_l4per_uart1_clkctrl,
717 MODULE_CLKCTRL_MODULEMODE_MASK,
718 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
719 MODULE_CLKCTRL_MODULEMODE_SHIFT);
720
721 clrsetbits_le32((*prcm)->cm_l4per_uart2_clkctrl,
722 MODULE_CLKCTRL_MODULEMODE_MASK,
723 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
724 MODULE_CLKCTRL_MODULEMODE_SHIFT);
725
726 clrsetbits_le32((*prcm)->cm_l4per_uart3_clkctrl,
727 MODULE_CLKCTRL_MODULEMODE_MASK,
728 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
729 MODULE_CLKCTRL_MODULEMODE_SHIFT);
730
731 clrsetbits_le32((*prcm)->cm_l4per_uart4_clkctrl,
732 MODULE_CLKCTRL_MODULEMODE_MASK,
733 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
734 MODULE_CLKCTRL_MODULEMODE_SHIFT);
735
736 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
737 CD_CLKCTRL_CLKTRCTRL_HW_AUTO <<
738 CD_CLKCTRL_CLKTRCTRL_SHIFT);
739}
740
741void do_enable_clocks(u32 const *clk_domains,
742 u32 const *clk_modules_hw_auto,
743 u32 const *clk_modules_explicit_en,
744 u8 wait_for_enable)
745{
746 u32 i, max = 100;
747
748 /* Put the clock domains in SW_WKUP mode */
749 for (i = 0; (i < max) && clk_domains[i]; i++) {
750 enable_clock_domain(clk_domains[i],
751 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
752 }
753
754 /* Clock modules that need to be put in HW_AUTO */
755 for (i = 0; (i < max) && clk_modules_hw_auto[i]; i++) {
756 enable_clock_module(clk_modules_hw_auto[i],
757 MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
758 wait_for_enable);
759 };
760
761 /* Clock modules that need to be put in SW_EXPLICIT_EN mode */
762 for (i = 0; (i < max) && clk_modules_explicit_en[i]; i++) {
763 enable_clock_module(clk_modules_explicit_en[i],
764 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
765 wait_for_enable);
766 };
767
768 /* Put the clock domains in HW_AUTO mode now */
769 for (i = 0; (i < max) && clk_domains[i]; i++) {
770 enable_clock_domain(clk_domains[i],
771 CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
772 }
773}
774
775void prcm_init(void)
776{
777 switch (omap_hw_init_context()) {
778 case OMAP_INIT_CONTEXT_SPL:
779 case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
780 case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
781 enable_basic_clocks();
782 timer_init();
783 scale_vcores(*omap_vcores);
784 setup_dplls();
785 setup_warmreset_time();
786 break;
787 default:
788 break;
789 }
790
791 if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context())
792 enable_basic_uboot_clocks();
793}
794
795void gpi2c_init(void)
796{
797 static int gpi2c = 1;
798
799 if (gpi2c) {
800 i2c_init(CONFIG_SYS_OMAP24_I2C_SPEED,
801 CONFIG_SYS_OMAP24_I2C_SLAVE);
802 gpi2c = 0;
803 }
804}