blob: d19760ce5dfcd199bec6e8f35f0c30814d1af337 [file] [log] [blame]
Googler9398cc32022-12-02 17:21:52 +08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Amlogic AXI PCIe host controller driver
4 *
5 * Copyright (c) 2016 Amlogic, Inc.
6 *
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/gpio/consumer.h>
12#include <linux/of_pci.h>
13#include <linux/phy/phy.h>
14#include <linux/platform_device.h>
15#include <linux/reset.h>
16#include <linux/of_gpio.h>
17
18#include "pcie-amlogic-v3.h"
19
20int amlogic_pcie_get_reset(struct amlogic_pcie *amlogic)
21{
22 struct device *dev = amlogic->dev;
23 struct platform_device *pdev = to_platform_device(dev);
24 struct device_node *node = dev->of_node;
25 struct resource *rst_regs;
26 int ret = 0;
27
28 /*m31phy_pcie_wrapper_rst*/
29 amlogic->m31phy_rst = devm_reset_control_get_exclusive(dev,
30 "m31phy_rst");
31 if (IS_ERR(amlogic->m31phy_rst)) {
32 if (PTR_ERR(amlogic->m31phy_rst) != -EPROBE_DEFER)
33 goto get_rst_reg;
34 }
35
36 /*pcie_gen3_l0_rst*/
37 amlogic->gen3_l0_rst = devm_reset_control_get_exclusive(dev,
38 "gen3_lo_rst");
39 if (IS_ERR(amlogic->gen3_l0_rst)) {
40 if (PTR_ERR(amlogic->gen3_l0_rst) != -EPROBE_DEFER)
41 dev_err(dev, "gen3_l0_rst reset property in node\n");
42 return PTR_ERR(amlogic->gen3_l0_rst);
43 }
44
45 amlogic->pcie_apb_rst = devm_reset_control_get_exclusive(dev,
46 "apb_rst");
47 if (IS_ERR(amlogic->pcie_apb_rst)) {
48 if (PTR_ERR(amlogic->pcie_apb_rst) != -EPROBE_DEFER)
49 dev_err(dev, "pcie_apb_rstreset property in node\n");
50 return PTR_ERR(amlogic->pcie_apb_rst);
51 }
52
53 amlogic->pcie_phy_rst = devm_reset_control_get_exclusive(dev,
54 "phy_rst");
55 if (IS_ERR(amlogic->pcie_phy_rst)) {
56 if (PTR_ERR(amlogic->pcie_phy_rst) != -EPROBE_DEFER)
57 dev_err(dev, "pcie_phy_rst property in node\n");
58 return PTR_ERR(amlogic->pcie_phy_rst);
59 }
60
61 amlogic->pcie_a_rst = devm_reset_control_get_exclusive(dev,
62 "pcie_a_rst");
63 if (IS_ERR(amlogic->pcie_a_rst)) {
64 if (PTR_ERR(amlogic->pcie_a_rst) != -EPROBE_DEFER)
65 dev_err(dev, "pcie_a_rst reset property in node\n");
66 return PTR_ERR(amlogic->pcie_a_rst);
67 }
68
69 amlogic->pcie_rst0 = devm_reset_control_get_exclusive(dev, "pcie_rst0");
70 if (IS_ERR(amlogic->pcie_rst0)) {
71 if (PTR_ERR(amlogic->pcie_rst0) != -EPROBE_DEFER)
72 dev_err(dev, "pcie_rst0 property in node\n");
73 return PTR_ERR(amlogic->pcie_rst0);
74 }
75
76 amlogic->pcie_rst1 = devm_reset_control_get_exclusive(dev, "pcie_rst1");
77 if (IS_ERR(amlogic->pcie_rst1)) {
78 if (PTR_ERR(amlogic->pcie_rst1) != -EPROBE_DEFER)
79 dev_err(dev, "pcie_rst1 reset property in node\n");
80 return PTR_ERR(amlogic->pcie_rst1);
81 }
82
83 amlogic->pcie_rst2 = devm_reset_control_get_exclusive(dev, "pcie_rst2");
84 if (IS_ERR(amlogic->pcie_rst2)) {
85 if (PTR_ERR(amlogic->pcie_rst2) != -EPROBE_DEFER)
86 dev_err(dev, "pcie_rst2 reset property in node\n");
87 return PTR_ERR(amlogic->pcie_rst2);
88 }
89
90 amlogic->pcie_rst3 = devm_reset_control_get_exclusive(dev, "pcie_rst3");
91 if (IS_ERR(amlogic->pcie_rst3)) {
92 if (PTR_ERR(amlogic->pcie_rst3) != -EPROBE_DEFER)
93 dev_err(dev, "pcie_rst3 reset property in node\n");
94 return PTR_ERR(amlogic->pcie_rst3);
95 }
96
97 amlogic->pcie_rst4 = devm_reset_control_get_exclusive(dev, "pcie_rst4");
98 if (IS_ERR(amlogic->pcie_rst4)) {
99 if (PTR_ERR(amlogic->pcie_rst4) != -EPROBE_DEFER)
100 dev_err(dev, "pcie_rst4 reset property in node\n");
101 return PTR_ERR(amlogic->pcie_rst4);
102 }
103
104 amlogic->pcie_rst5 = devm_reset_control_get_exclusive(dev, "pcie_rst5");
105 if (IS_ERR(amlogic->pcie_rst5)) {
106 if (PTR_ERR(amlogic->pcie_rst5) != -EPROBE_DEFER)
107 dev_err(dev, "pcie_rst5 reset property in node\n");
108 return PTR_ERR(amlogic->pcie_rst5);
109 }
110
111 amlogic->pcie_rst6 = devm_reset_control_get_exclusive(dev, "pcie_rst6");
112 if (IS_ERR(amlogic->pcie_rst6)) {
113 if (PTR_ERR(amlogic->pcie_rst6) != -EPROBE_DEFER)
114 dev_err(dev, "pcie_rst6 reset property in node\n");
115 return PTR_ERR(amlogic->pcie_rst6);
116 }
117
118 amlogic->pcie_rst7 = devm_reset_control_get_exclusive(dev, "pcie_rst7");
119 if (IS_ERR(amlogic->pcie_rst7)) {
120 if (PTR_ERR(amlogic->pcie_rst7) != -EPROBE_DEFER)
121 dev_err(dev, "pcie_rst7 reset property in node\n");
122 return PTR_ERR(amlogic->pcie_rst7);
123 }
124
125 return 0;
126
127get_rst_reg:
128 rst_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
129 "reset-base");
130 amlogic->rst_base = devm_ioremap(dev, rst_regs->start,
131 resource_size(rst_regs));
132 if (IS_ERR(amlogic->rst_base)) {
133 dev_err(dev, "failed to request rst_base\n");
134 return PTR_ERR(amlogic->rst_base);
135 }
136
137 ret = of_property_read_u32(node, "pcie-m31phy-rst-bit",
138 &amlogic->m31phy_rst_bit);
139 if (ret) {
140 dev_err(dev, "failed to request m31phy_rst_bit\n");
141 return ret;
142 }
143
144 ret = of_property_read_u32(node, "pcie-gen3-l0-rst-bit",
145 &amlogic->gen3_l0_rst_bit);
146 if (ret) {
147 dev_err(dev, "failed to request gen3_l0_rst_bit\n");
148 return ret;
149 }
150
151 ret = of_property_read_u32(node, "pcie-apb-rst-bit",
152 &amlogic->apb_rst_bit);
153 if (ret) {
154 dev_err(dev, "failed to request apb_rst_bit\n");
155 return ret;
156 }
157
158 ret = of_property_read_u32(node, "pcie-phy-rst-bit",
159 &amlogic->phy_rst_bit);
160 if (ret) {
161 dev_err(dev, "failed to request phy_rst_bit\n");
162 return ret;
163 }
164
165 ret = of_property_read_u32(node, "pcie-a-rst-bit",
166 &amlogic->pcie_a_rst_bit);
167 if (ret) {
168 dev_err(dev, "failed to request pcie_a_rst_bit\n");
169 return ret;
170 }
171
172 ret = of_property_read_u32(node, "pcie-rst-bit",
173 &amlogic->pcie_rst_bit);
174 if (ret) {
175 dev_err(dev, "failed to request pcie_rst_bit\n");
176 return ret;
177 }
178
179 ret = of_property_read_u32(node, "pcie-rst-mask",
180 &amlogic->pcie_rst_mask);
181 if (ret) {
182 dev_err(dev, "failed to request pcie_rst_size\n");
183 return ret;
184 }
185
186 return 0;
187}
188
189int amlogic_pcie_set_reset(struct amlogic_pcie *amlogic, bool set)
190{
191 struct device *dev = amlogic->dev;
192 int err = 0, val = 0;
193 int regs = 0;
194
195 if (amlogic->rst_base)
196 goto set_rst_reg;
197
198 err = reset_control_deassert(amlogic->m31phy_rst);
199 if (err < 0) {
200 dev_err(dev, "deassert m31phy_rst err %d\n", err);
201 return err;
202 }
203
204 err = reset_control_assert(amlogic->m31phy_rst);
205 if (err < 0) {
206 dev_err(dev, "assert m31phy_rst err %d\n", err);
207 return err;
208 }
209
210 err = reset_control_deassert(amlogic->gen3_l0_rst);
211 if (err < 0) {
212 dev_err(dev, "deassert gen3_l0_rst err %d\n", err);
213 return err;
214 }
215
216 err = reset_control_assert(amlogic->gen3_l0_rst);
217 if (err < 0) {
218 dev_err(dev, "assert gen3_l0_rst err %d\n", err);
219 return err;
220 }
221
222 err = reset_control_deassert(amlogic->pcie_apb_rst);
223 if (err < 0) {
224 dev_err(dev, "deassert pcie_apb_rst err %d\n", err);
225 return err;
226 }
227
228 err = reset_control_assert(amlogic->pcie_apb_rst);
229 if (err < 0) {
230 dev_err(dev, "assert pcie_apb_rst err %d\n", err);
231 return err;
232 }
233
234 err = reset_control_deassert(amlogic->pcie_phy_rst);
235 if (err) {
236 dev_err(dev, "deassert pcie_phy_rst err %d\n", err);
237 return err;
238 }
239
240 err = reset_control_assert(amlogic->pcie_phy_rst);
241 if (err < 0) {
242 dev_err(dev, "assert pcie_phy_rst err %d\n", err);
243 return err;
244 }
245
246 err = reset_control_deassert(amlogic->pcie_a_rst);
247 if (err) {
248 dev_err(dev, "deassert pcie_a_rst err %d\n", err);
249 return err;
250 }
251
252 err = reset_control_assert(amlogic->pcie_a_rst);
253 if (err < 0) {
254 dev_err(dev, "assert pcie_a_rst err %d\n", err);
255 return err;
256 }
257
258 err = reset_control_deassert(amlogic->pcie_rst0);
259 if (err) {
260 dev_err(dev, "deassert pcie_rst0 err %d\n", err);
261 return err;
262 }
263
264 err = reset_control_assert(amlogic->pcie_rst0);
265 if (err < 0) {
266 dev_err(dev, "assert pcie_rst0 err %d\n", err);
267 return err;
268 }
269
270 err = reset_control_deassert(amlogic->pcie_rst1);
271 if (err) {
272 dev_err(dev, "deassert pcie_rst1 err %d\n", err);
273 return err;
274 }
275
276 err = reset_control_assert(amlogic->pcie_rst1);
277 if (err < 0) {
278 dev_err(dev, "assert pcie_rst1 err %d\n", err);
279 return err;
280 }
281
282 err = reset_control_deassert(amlogic->pcie_rst2);
283 if (err) {
284 dev_err(dev, "deassert pcie_rst2 err %d\n", err);
285 return err;
286 }
287
288 err = reset_control_assert(amlogic->pcie_rst2);
289 if (err < 0) {
290 dev_err(dev, "assert pcie_rst2 err %d\n", err);
291 return err;
292 }
293
294 err = reset_control_deassert(amlogic->pcie_rst3);
295 if (err) {
296 dev_err(dev, "deassert pcie_rst3 err %d\n", err);
297 return err;
298 }
299
300 err = reset_control_assert(amlogic->pcie_rst3);
301 if (err < 0) {
302 dev_err(dev, "assert pcie_rst3 err %d\n", err);
303 return err;
304 }
305
306 err = reset_control_deassert(amlogic->pcie_rst4);
307 if (err) {
308 dev_err(dev, "deassert pcie_rst4 err %d\n", err);
309 return err;
310 }
311
312 err = reset_control_assert(amlogic->pcie_rst4);
313 if (err < 0) {
314 dev_err(dev, "assert pcie_rst4 err %d\n", err);
315 return err;
316 }
317
318 err = reset_control_deassert(amlogic->pcie_rst5);
319 if (err) {
320 dev_err(dev, "deassert pcie_rst5 err %d\n", err);
321 return err;
322 }
323
324 err = reset_control_assert(amlogic->pcie_rst5);
325 if (err < 0) {
326 dev_err(dev, "assert pcie_rst5 err %d\n", err);
327 return err;
328 }
329
330 err = reset_control_deassert(amlogic->pcie_rst6);
331 if (err) {
332 dev_err(dev, "deassert pcie_rst6 err %d\n", err);
333 return err;
334 }
335
336 err = reset_control_assert(amlogic->pcie_rst6);
337 if (err < 0) {
338 dev_err(dev, "assert pcie_rst6 err %d\n", err);
339 return err;
340 }
341
342 err = reset_control_deassert(amlogic->pcie_rst7);
343 if (err) {
344 dev_err(dev, "deassert pcie_rst7 err %d\n", err);
345 return err;
346 }
347
348 err = reset_control_assert(amlogic->pcie_rst7);
349 if (err < 0) {
350 dev_err(dev, "assert pcie_rst7 err %d\n", err);
351 return err;
352 }
353
354set_rst_reg:
355 if (!set) {
356 val = readl(amlogic->rst_base + RESETCTRL3_OFFSET);
357 val &= ~(amlogic->pcie_rst_mask << amlogic->pcie_rst_bit);
358 writel(val, amlogic->rst_base + RESETCTRL3_OFFSET);
359 val = amlogic_pciectrl_read(amlogic, PCIE_A_CTRL0);
360
361 amlogic_pciectrl_write(amlogic, val, PCIE_A_CTRL0);
362
363 val = readl(amlogic->rst_base + RESETCTRL1_OFFSET);
364 val &= ~((1 << amlogic->pcie_a_rst_bit) |
365 (1 << amlogic->phy_rst_bit) |
366 (1 << amlogic->apb_rst_bit));
367 writel(val, amlogic->rst_base + RESETCTRL1_OFFSET);
368 } else {
369 val = readl(amlogic->rst_base + RESETCTRL3_OFFSET);
370 val |= (amlogic->pcie_rst_mask << amlogic->pcie_rst_bit);
371 writel(val, amlogic->rst_base + RESETCTRL3_OFFSET);
372
373 /*PHY_Register_XCFGA_COM value from vendor recommend*/
374 regs = readl(amlogic->phy_base + 0x828);
375 regs &= ~GENMASK(31, 28);
376 regs |= (2 << 28);
377 writel(regs, amlogic->phy_base + 0x828);
378
379 /*PHY_Register_XCFGD value from vendor recommend*/
380 regs = readl(amlogic->phy_base + 0x460);
381 regs &= ~GENMASK(23, 20);
382 regs |= (4 << 20);
383 writel(regs, amlogic->phy_base + 0x460);
384
385 val = readl(amlogic->rst_base + RESETCTRL1_OFFSET);
386 val |= ((1 << amlogic->pcie_a_rst_bit) |
387 (1 << amlogic->phy_rst_bit) |
388 (1 << amlogic->apb_rst_bit));
389 writel(val, amlogic->rst_base + RESETCTRL1_OFFSET);
390
391 val = amlogic_pciectrl_read(amlogic, PCIE_A_CTRL0);
392
393 if (amlogic->is_rc)
394 val |= PORT_TYPE;
395 else
396 val &= ~PORT_TYPE;
397 amlogic_pciectrl_write(amlogic, val, PCIE_A_CTRL0);
398 }
399
400 return 0;
401}
402EXPORT_SYMBOL_GPL(amlogic_pcie_set_reset);
403
404void amlogic_pcie_set_reset_gpio(struct amlogic_pcie *amlogic)
405{
406 struct device *dev = amlogic->dev;
407 int ret = 0;
408
409 /*reset-gpio-type 0:Shared pad(no reset)1:OD pad2:Normal pad*/
410 if (amlogic->gpio_type == 0) {
411 dev_info(dev, "gpio multiplex, don't reset!\n");
412 } else if (amlogic->gpio_type == 1) {
413 dev_info(dev, "pad gpio\n");
414 if (amlogic->reset_gpio >= 0)
415 ret = devm_gpio_request(dev,
416 amlogic->reset_gpio,
417 "RESET");
418
419 if (!ret && gpio_is_valid(amlogic->reset_gpio)) {
420 dev_info(dev, "GPIO pad: assert reset\n");
421 gpio_direction_output(amlogic->reset_gpio, 0);
422 usleep_range(5000, 6000);
423 gpio_direction_input(amlogic->reset_gpio);
424 }
425 } else {
426 dev_info(dev, "normal gpio\n");
427 if (amlogic->reset_gpio >= 0) {
428 ret = devm_gpio_request(dev,
429 amlogic->reset_gpio,
430 "RESET");
431 if (!ret)
432 gpio_direction_output(amlogic->reset_gpio, 0);
433 }
434 if (gpio_is_valid(amlogic->reset_gpio)) {
435 dev_info(dev, "GPIO normal: assert reset\n");
436 gpio_set_value_cansleep(amlogic->reset_gpio, 0);
437 usleep_range(5000, 6000);
438 gpio_set_value_cansleep(amlogic->reset_gpio, 1);
439 }
440 }
441}
442
443int amlogic_pcie_parse_dt(struct amlogic_pcie *amlogic)
444{
445 struct device *dev = amlogic->dev;
446 struct platform_device *pdev = to_platform_device(dev);
447 struct device_node *node = dev->of_node;
448 struct resource *res;
449 int err;
450
451 if (of_get_property(dev->of_node, "pinctrl-names", NULL))
452 amlogic->p = devm_pinctrl_get_select_default(dev);
453
454 err = of_property_read_u32(node, "gpio-type",
455 &amlogic->gpio_type);
456
457 amlogic->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
458
459 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
460 "apb-base");
461 amlogic->apb_base = devm_ioremap_resource(dev, res);
462 if (IS_ERR(amlogic->apb_base)) {
463 dev_err(dev, "failed to request apb_base\n");
464 return PTR_ERR(amlogic->apb_base);
465 }
466
467 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
468 "pcictrl-base");
469 amlogic->pcictrl_base = devm_ioremap_resource(dev, res);
470 if (IS_ERR(amlogic->pcictrl_base)) {
471 dev_err(dev, "failed to request pcictrl_base\n");
472 return PTR_ERR(amlogic->pcictrl_base);
473 }
474
475 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
476 "ecam-base");
477 if (res) {
478 amlogic->ecam_size = resource_size(res);
479 amlogic->ecam_bus_base = res->start;
480 } else {
481 dev_err(dev, "Missing *ecam-base* reg space\n");
482 return -ENODEV;
483 }
484 amlogic->ecam_base = devm_pci_remap_cfg_resource(dev, res);
485 if (IS_ERR(amlogic->ecam_base)) {
486 dev_err(dev, "failed to request ecam_base\n");
487 return PTR_ERR(amlogic->ecam_base);
488 }
489
490 err = amlogic_pcie_get_phys(amlogic);
491 if (err)
492 return err;
493
494 amlogic->lanes = 1;
495 err = of_property_read_u32(node, "num-lanes", &amlogic->lanes);
496 if (!err && amlogic->lanes == 0) {
497 dev_err(dev, "invalid num-lanes, default to use one lane\n");
498 amlogic->lanes = 1;
499 }
500
501 err = amlogic_pcie_get_reset(amlogic);
502 if (err)
503 return err;
504
505 amlogic->pcie_400m_clk = devm_clk_get(dev, "pcie_400m_clk");
506 if (IS_ERR(amlogic->pcie_400m_clk)) {
507 dev_err(dev, "pcie_400m_clk not found\n");
508 return PTR_ERR(amlogic->pcie_400m_clk);
509 }
510
511 amlogic->pcie_tl_clk = devm_clk_get(dev, "pcie_tl_clk");
512 if (IS_ERR(amlogic->pcie_tl_clk)) {
513 dev_err(dev, "pcie_tl_clk not found\n");
514 return PTR_ERR(amlogic->pcie_tl_clk);
515 }
516
517 amlogic->cts_pcie_clk = devm_clk_get(dev, "cts_pcie_clk");
518 if (IS_ERR(amlogic->cts_pcie_clk)) {
519 dev_err(dev, "cts_pcie_clk not found\n");
520 return PTR_ERR(amlogic->cts_pcie_clk);
521 }
522
523 amlogic->pcie_clk = devm_clk_get(dev, "pcie");
524 if (IS_ERR(amlogic->pcie_clk)) {
525 dev_err(dev, "pcie_clk not found\n");
526 return PTR_ERR(amlogic->pcie_clk);
527 }
528
529 amlogic->phy_clk = devm_clk_get(dev, "pcie_phy");
530 if (IS_ERR(amlogic->phy_clk)) {
531 dev_err(dev, "phy_clk not found\n");
532 return PTR_ERR(amlogic->phy_clk);
533 }
534
535 amlogic->refpll_clk = devm_clk_get(dev, "pcie_refpll");
536 if (IS_ERR(amlogic->refpll_clk)) {
537 dev_err(dev, "refpll_clk not found\n");
538 return PTR_ERR(amlogic->refpll_clk);
539 }
540
541 amlogic->dev_clk = devm_clk_get(dev, "pcie_hcsl");
542 if (IS_ERR(amlogic->dev_clk)) {
543 dev_err(dev, "dev_clk not found\n");
544 return PTR_ERR(amlogic->dev_clk);
545 }
546
547 return 0;
548}
549EXPORT_SYMBOL_GPL(amlogic_pcie_parse_dt);
550
551bool amlogic_pcie_link_up(struct amlogic_pcie *amlogic)
552{
553 struct device *dev = amlogic->dev;
554 u32 ltssm_up = 0;
555 u32 neg_link_speed = 0;
556 int cnt = 0;
557 u32 val = 0;
558
559 do {
560 ltssm_up = amlogic_pciectrl_read(amlogic, PCIE_A_CTRL5);
561 ltssm_up = ((ltssm_up >> 18) & 0x1f) == 0x10 ? 1 : 0;
562 dev_dbg(dev, "%s:%d, ltssm_up=0x%x\n", __func__, __LINE__,
563 ((amlogic_pciectrl_read(amlogic,
564 PCIE_A_CTRL5) >> 18) & 0x1f));
565
566 val = amlogic_pcieinter_read(amlogic, PCIE_BASIC_STATUS);
567 neg_link_speed = (val >> 8) & 0xf;
568
569 if (ltssm_up)
570 dev_dbg(dev, "ltssm_up is on\n");
571 if (neg_link_speed)
572 dev_dbg(dev, "speed_okay\n");
573
574 if (cnt >= WAIT_LINKUP_TIMEOUT) {
575 dev_err(dev, "Error: Wait linkup timeout.\n");
576 return false;
577 }
578 cnt++;
579 udelay(20);
580 } while (ltssm_up == 0);
581
582 return true;
583}
584
585void amlogic_set_max_rd_req_size(struct amlogic_pcie *amlogic, int size)
586{
587 int max_rd_req_size = 1;
588 u32 val = 0;
589
590 switch (size) {
591 case 128:
592 max_rd_req_size = 0;
593 break;
594 case 256:
595 max_rd_req_size = 1;
596 break;
597 case 512:
598 max_rd_req_size = 2;
599 break;
600 case 1024:
601 max_rd_req_size = 3;
602 break;
603 case 2048:
604 max_rd_req_size = 4;
605 break;
606 case 4096:
607 max_rd_req_size = 5;
608 break;
609 default:
610 max_rd_req_size = 1;
611 break;
612 }
613
614 val = amlogic_pcieinter_read(amlogic,
615 PCIE_CAP_OFFSET + PCI_EXP_DEVCTL);
616 val &= (~PCI_EXP_DEVCTL_READRQ);
617 val |= (max_rd_req_size << 12);
618 amlogic_pcieinter_write(amlogic, val,
619 PCIE_CAP_OFFSET + PCI_EXP_DEVCTL);
620}
621
622void amlogic_set_max_payload(struct amlogic_pcie *amlogic, int size)
623{
624 int max_payload_size = 1;
625 u32 val = 0;
626
627 switch (size) {
628 case 128:
629 max_payload_size = 0;
630 break;
631 case 256:
632 max_payload_size = 1;
633 break;
634 case 512:
635 max_payload_size = 2;
636 break;
637 case 1024:
638 max_payload_size = 3;
639 break;
640 case 2048:
641 max_payload_size = 4;
642 break;
643 case 4096:
644 max_payload_size = 5;
645 break;
646 default:
647 max_payload_size = 1;
648 break;
649 }
650
651 val = amlogic_pcieinter_read(amlogic,
652 PCIE_CAP_OFFSET + PCI_EXP_DEVCTL);
653 val &= (~PCI_EXP_DEVCTL_PAYLOAD);
654 val |= (max_payload_size << 5);
655 amlogic_pcieinter_write(amlogic, val,
656 PCIE_CAP_OFFSET + PCI_EXP_DEVCTL);
657}
658
659int amlogic_pcie_init_port(struct amlogic_pcie *amlogic)
660{
661 struct device *dev = amlogic->dev;
662 int err;
663 u32 regs;
664 u32 val;
665
666 val = readl(amlogic->rst_base + RESETCTRL1_OFFSET);
667 val &= ~(1 << amlogic->m31phy_rst_bit);
668 writel(val, amlogic->rst_base + RESETCTRL1_OFFSET);
669 val = readl(amlogic->rst_base + RESETCTRL1_OFFSET);
670 val |= (1 << amlogic->m31phy_rst_bit);
671 writel(val, amlogic->rst_base + RESETCTRL1_OFFSET);
672
673 err = amlogic_pcie_set_reset(amlogic, false);
674 if (err)
675 return err;
676
677 /*PHY_Register_XCFGD value from vendor recommend*/
678 regs = readl(amlogic->phy_base + 0x470);
679 regs |= (1 << 6);
680 writel(regs, amlogic->phy_base + 0x470);
681
682 /*set phy for gen3 device*/
683 regs = readl(amlogic->phy_base);
684 regs |= BIT(19);
685 writel(regs, amlogic->phy_base);
686 usleep_range(20, 30);
687
688 amlogic_pcie_set_reset_gpio(amlogic);
689
690 err = amlogic_pcie_set_reset(amlogic, true);
691 if (err)
692 return err;
693
694 if (!amlogic_pcie_link_up(amlogic))
695 return -ETIMEDOUT;
696 regs = amlogic_pcieinter_read(amlogic, PCIE_BASIC_STATUS);
697
698 dev_info(dev, "current linK speed is GEN%d,link width is x%d\n",
699 ((regs >> 8) & 0x3f), (regs & 0xff));
700
701 return 0;
702}
703EXPORT_SYMBOL_GPL(amlogic_pcie_init_port);
704
705int amlogic_pcie_get_phys(struct amlogic_pcie *amlogic)
706{
707 struct device *dev = amlogic->dev;
708 struct platform_device *pdev = to_platform_device(dev);
709 struct resource *res;
710 struct phy *phy;
711
712 phy = devm_of_phy_get(dev, dev->of_node, "pcie-phy");
713 if (IS_ERR(phy)) {
714 if (PTR_ERR(phy) != -EPROBE_DEFER)
715 goto get_phy_reg;
716 }
717
718get_phy_reg:
719 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
720 "phy-base");
721 amlogic->phy_base = devm_ioremap_resource(dev, res);
722 if (IS_ERR(amlogic->phy_base)) {
723 dev_err(dev, "failed to request phy_base\n");
724 return PTR_ERR(amlogic->phy_base);
725 }
726 return 0;
727}
728EXPORT_SYMBOL_GPL(amlogic_pcie_get_phys);
729
730void amlogic_pcie_deinit_phys(struct amlogic_pcie *amlogic)
731{
732}
733EXPORT_SYMBOL_GPL(amlogic_pcie_deinit_phys);
734
735void amlogic_pcie_init_phys(struct amlogic_pcie *amlogic)
736{
737}
738EXPORT_SYMBOL_GPL(amlogic_pcie_init_phys);
739
740int amlogic_pcie_enable_clocks(struct amlogic_pcie *amlogic)
741{
742 struct device *dev = amlogic->dev;
743 int err = 0, ret = 0;
744
745 ret = clk_set_rate(amlogic->pcie_400m_clk, 400000000);
746 err = clk_prepare_enable(amlogic->pcie_400m_clk);
747 if (err || ret) {
748 dev_err(dev, "unable to enable pcie_400m_clk clock\n");
749 return err;
750 }
751
752 ret = clk_set_rate(amlogic->pcie_tl_clk, 125000000);
753 err = clk_prepare_enable(amlogic->pcie_tl_clk);
754 if (err || ret) {
755 dev_err(dev, "unable to enable pcie_tl_clk clock\n");
756 goto err_400m_clk;
757 }
758
759 ret = clk_set_rate(amlogic->cts_pcie_clk, 200000000);
760 err = clk_prepare_enable(amlogic->cts_pcie_clk);
761 if (err || ret) {
762 dev_err(dev, "unable to enable cts_pcie_clk clock\n");
763 goto err_tl_clk;
764 }
765
766 err = clk_prepare_enable(amlogic->dev_clk);
767 if (err) {
768 dev_err(dev, "unable to enable dev_clk clock\n");
769 goto err_cts_pcie_clk;
770 }
771
772 err = clk_prepare_enable(amlogic->phy_clk);
773 if (err) {
774 dev_err(dev, "unable to enable phy_clk clock\n");
775 goto err_dev_clk;
776 }
777
778 err = clk_prepare_enable(amlogic->refpll_clk);
779 if (err) {
780 dev_err(dev, "unable to enable refpll_clk clock\n");
781 goto err_phy_clk;
782 }
783
784 err = clk_prepare_enable(amlogic->pcie_clk);
785 if (err) {
786 dev_err(dev, "unable to enable pcie_clk clock\n");
787 goto err_refpll_clk;
788 }
789
790 return 0;
791
792err_refpll_clk:
793 clk_disable_unprepare(amlogic->refpll_clk);
794err_phy_clk:
795 clk_disable_unprepare(amlogic->phy_clk);
796err_dev_clk:
797 clk_disable_unprepare(amlogic->dev_clk);
798err_cts_pcie_clk:
799 clk_disable_unprepare(amlogic->cts_pcie_clk);
800err_tl_clk:
801 clk_disable_unprepare(amlogic->pcie_tl_clk);
802err_400m_clk:
803 clk_disable_unprepare(amlogic->pcie_400m_clk);
804 return err;
805}
806EXPORT_SYMBOL_GPL(amlogic_pcie_enable_clocks);
807
808void amlogic_pcie_disable_clocks(struct amlogic_pcie *amlogic)
809{
810 clk_disable_unprepare(amlogic->pcie_clk);
811 clk_disable_unprepare(amlogic->refpll_clk);
812 clk_disable_unprepare(amlogic->phy_clk);
813 clk_disable_unprepare(amlogic->dev_clk);
814 clk_disable_unprepare(amlogic->cts_pcie_clk);
815 clk_disable_unprepare(amlogic->pcie_tl_clk);
816 clk_disable_unprepare(amlogic->pcie_400m_clk);
817}
818EXPORT_SYMBOL_GPL(amlogic_pcie_disable_clocks);
819
820void amlogic_pcie_cfg_addr_map(struct amlogic_pcie *amlogic,
821 unsigned int atr_base,
822 u64 src_addr,
823 u64 trsl_addr,
824 int size,
825 int trsl_param)
826{
827 struct device *dev = amlogic->dev;
828 u32 val;
829
830 /* ATR_SRC_ADDR_LOW:
831 * - bit 0: enable entry,
832 * - bits 1-6: ATR window size: total size in bytes: 2^(ATR_WSIZE + 1)
833 * - bits 7-11: reserved
834 * - bits 12-31: start of source address
835 */
836 val = (src_addr & 0xfffff000) | ((size & 0x3f) << 1) | (1 << 0);
837 amlogic_pcieinter_write(amlogic, val, atr_base + ATR_SRC_ADDR_LOW);
838
839 amlogic_pcieinter_write(amlogic,
840 (src_addr >> 32),
841 atr_base + ATR_SRC_ADDR_HIGH);
842 amlogic_pcieinter_write(amlogic,
843 (trsl_addr & 0xfffff000),
844 atr_base + ATR_TRSL_ADDR_LOW);
845 amlogic_pcieinter_write(amlogic,
846 (trsl_addr >> 32),
847 atr_base + ATR_TRSL_ADDR_HIGH);
848 amlogic_pcieinter_write(amlogic, trsl_param,
849 atr_base + ATR_TRSL_PARAM);
850
851 dev_dbg(dev,
852 "ATR Map:0x%010llx %s 0x%010llx [0x%010llx] (param: 0x%06x)\n",
853 src_addr, (trsl_param & 0x400000) ? "<-" : "->", trsl_addr,
854 ((u64)1) << (size + 1), trsl_param);
855}
856EXPORT_SYMBOL_GPL(amlogic_pcie_cfg_addr_map);