1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
|
// SPDX-License-Identifier: GPL-2.0+
/*
* PCI <-> OF mapping helpers
*
* Copyright 2011 IBM Corp.
*/
#define pr_fmt(fmt) "PCI: OF: " fmt
#include <linux/cleanup.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include "pci.h"
#ifdef CONFIG_PCI
/**
* pci_set_of_node - Find and set device's DT device_node
* @dev: the PCI device structure to fill
*
* Returns 0 on success with of_node set or when no device is described in the
* DT. Returns -ENODEV if the device is present, but disabled in the DT.
*/
int pci_set_of_node(struct pci_dev *dev)
{
if (!dev->bus->dev.of_node)
return 0;
struct device_node *node __free(device_node) =
of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn);
if (!node)
return 0;
struct device *pdev __free(put_device) =
bus_find_device_by_of_node(&platform_bus_type, node);
if (pdev)
dev->bus->dev.of_node_reused = true;
device_set_node(&dev->dev, of_fwnode_handle(no_free_ptr(node)));
return 0;
}
void pci_release_of_node(struct pci_dev *dev)
{
of_node_put(dev->dev.of_node);
device_set_node(&dev->dev, NULL);
}
void pci_set_bus_of_node(struct pci_bus *bus)
{
struct device_node *node;
if (bus->self == NULL) {
node = pcibios_get_phb_of_node(bus);
} else {
node = of_node_get(bus->self->dev.of_node);
if (node && of_property_read_bool(node, "external-facing"))
bus->self->external_facing = true;
}
device_set_node(&bus->dev, of_fwnode_handle(node));
}
void pci_release_bus_of_node(struct pci_bus *bus)
{
of_node_put(bus->dev.of_node);
device_set_node(&bus->dev, NULL);
}
struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
{
/* This should only be called for PHBs */
if (WARN_ON(bus->self || bus->parent))
return NULL;
/*
* Look for a node pointer in either the intermediary device we
* create above the root bus or its own parent. Normally only
* the later is populated.
*/
if (bus->bridge->of_node)
return of_node_get(bus->bridge->of_node);
if (bus->bridge->parent && bus->bridge->parent->of_node)
return of_node_get(bus->bridge->parent->of_node);
return NULL;
}
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
{
#ifdef CONFIG_IRQ_DOMAIN
struct irq_domain *d;
if (!bus->dev.of_node)
return NULL;
/* Start looking for a phandle to an MSI controller. */
d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
if (d)
return d;
/*
* If we don't have an msi-parent property, look for a domain
* directly attached to the host bridge.
*/
d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
if (d)
return d;
return irq_find_host(bus->dev.of_node);
#else
return NULL;
#endif
}
bool pci_host_of_has_msi_map(struct device *dev)
{
if (dev && dev->of_node)
return of_get_property(dev->of_node, "msi-map", NULL);
return false;
}
static inline int __of_pci_pci_compare(struct device_node *node,
unsigned int data)
{
int devfn;
devfn = of_pci_get_devfn(node);
if (devfn < 0)
return 0;
return devfn == data;
}
struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn)
{
struct device_node *node, *node2;
for_each_child_of_node(parent, node) {
if (__of_pci_pci_compare(node, devfn))
return node;
/*
* Some OFs create a parent node "multifunc-device" as
* a fake root for all functions of a multi-function
* device we go down them as well.
*/
if (of_node_name_eq(node, "multifunc-device")) {
for_each_child_of_node(node, node2) {
if (__of_pci_pci_compare(node2, devfn)) {
of_node_put(node);
return node2;
}
}
}
}
return NULL;
}
EXPORT_SYMBOL_GPL(of_pci_find_child_device);
/**
* of_pci_get_devfn() - Get device and function numbers for a device node
* @np: device node
*
* Parses a standard 5-cell PCI resource and returns an 8-bit value that can
* be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device
* and function numbers respectively. On error a negative error code is
* returned.
*/
int of_pci_get_devfn(struct device_node *np)
{
u32 reg[5];
int error;
error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
if (error)
return error;
return (reg[0] >> 8) & 0xff;
}
EXPORT_SYMBOL_GPL(of_pci_get_devfn);
/**
* of_pci_parse_bus_range() - parse the bus-range property of a PCI device
* @node: device node
* @res: address to a struct resource to return the bus-range
*
* Returns 0 on success or a negative error-code on failure.
*/
int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
{
u32 bus_range[2];
int error;
error = of_property_read_u32_array(node, "bus-range", bus_range,
ARRAY_SIZE(bus_range));
if (error)
return error;
res->name = node->name;
res->start = bus_range[0];
res->end = bus_range[1];
res->flags = IORESOURCE_BUS;
return 0;
}
EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
/**
* of_get_pci_domain_nr - Find the host bridge domain number
* of the given device node.
* @node: Device tree node with the domain information.
*
* This function will try to obtain the host bridge domain number by finding
* a property called "linux,pci-domain" of the given device node.
*
* Return:
* * > 0 - On success, an associated domain number.
* * -EINVAL - The property "linux,pci-domain" does not exist.
* * -ENODATA - The linux,pci-domain" property does not have value.
* * -EOVERFLOW - Invalid "linux,pci-domain" property value.
*
* Returns the associated domain number from DT in the range [0-0xffff], or
* a negative value if the required property is not found.
*/
int of_get_pci_domain_nr(struct device_node *node)
{
u32 domain;
int error;
error = of_property_read_u32(node, "linux,pci-domain", &domain);
if (error)
return error;
return (u16)domain;
}
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
/**
* of_pci_preserve_config - Return true if the boot configuration needs to
* be preserved
* @node: Device tree node.
*
* Look for "linux,pci-probe-only" property for a given PCI controller's
* node and return true if found. Also look in the chosen node if the
* property is not found in the given controller's node. Having this
* property ensures that the kernel doesn't reconfigure the BARs and bridge
* windows that are already done by the platform firmware.
*
* Return: true if the property exists; false otherwise.
*/
bool of_pci_preserve_config(struct device_node *node)
{
u32 val = 0;
int ret;
if (!node) {
pr_warn("device node is NULL, trying with of_chosen\n");
node = of_chosen;
}
retry:
ret = of_property_read_u32(node, "linux,pci-probe-only", &val);
if (ret) {
if (ret == -ENODATA || ret == -EOVERFLOW) {
pr_warn("Incorrect value for linux,pci-probe-only in %pOF, ignoring\n",
node);
return false;
}
if (ret == -EINVAL) {
if (node == of_chosen)
return false;
node = of_chosen;
goto retry;
}
}
if (val)
return true;
else
return false;
}
/**
* of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
* is present and valid
*/
void of_pci_check_probe_only(void)
{
if (of_pci_preserve_config(of_chosen))
pci_add_flags(PCI_PROBE_ONLY);
else
pci_clear_flags(PCI_PROBE_ONLY);
}
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
/**
* devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
* host bridge resources from DT
* @dev: host bridge device
* @busno: bus number associated with the bridge root bus
* @bus_max: maximum number of buses for this bridge
* @resources: list where the range of resources will be added after DT parsing
* @ib_resources: list where the range of inbound resources (with addresses
* from 'dma-ranges') will be added after DT parsing
* @io_base: pointer to a variable that will contain on return the physical
* address for the start of the I/O range. Can be NULL if the caller doesn't
* expect I/O ranges to be present in the device tree.
*
* This function will parse the "ranges" property of a PCI host bridge device
* node and setup the resource mapping based on its content. It is expected
* that the property conforms with the Power ePAPR document.
*
* It returns zero if the range parsing has been successful or a standard error
* value if it failed.
*/
static int devm_of_pci_get_host_bridge_resources(struct device *dev,
unsigned char busno, unsigned char bus_max,
struct list_head *resources,
struct list_head *ib_resources,
resource_size_t *io_base)
{
struct device_node *dev_node = dev->of_node;
struct resource *res, tmp_res;
struct resource *bus_range;
struct of_pci_range range;
struct of_pci_range_parser parser;
const char *range_type;
int err;
if (io_base)
*io_base = (resource_size_t)OF_BAD_ADDR;
bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL);
if (!bus_range)
return -ENOMEM;
dev_info(dev, "host bridge %pOF ranges:\n", dev_node);
err = of_pci_parse_bus_range(dev_node, bus_range);
if (err) {
bus_range->start = busno;
bus_range->end = bus_max;
bus_range->flags = IORESOURCE_BUS;
dev_info(dev, " No bus range found for %pOF, using %pR\n",
dev_node, bus_range);
} else {
if (bus_range->end > bus_range->start + bus_max)
bus_range->end = bus_range->start + bus_max;
}
pci_add_resource(resources, bus_range);
/* Check for ranges property */
err = of_pci_range_parser_init(&parser, dev_node);
if (err)
return 0;
dev_dbg(dev, "Parsing ranges property...\n");
for_each_of_pci_range(&parser, &range) {
/* Read next ranges element */
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
range_type = "IO";
else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
range_type = "MEM";
else
range_type = "err";
dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
range_type, range.cpu_addr,
range.cpu_addr + range.size - 1, range.pci_addr);
/*
* If we failed translation or got a zero-sized region
* then skip this range
*/
if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
continue;
err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
if (err)
continue;
res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
if (!res) {
err = -ENOMEM;
goto failed;
}
if (resource_type(res) == IORESOURCE_IO) {
if (!io_base) {
dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
dev_node);
err = -EINVAL;
goto failed;
}
if (*io_base != (resource_size_t)OF_BAD_ADDR)
dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
dev_node);
*io_base = range.cpu_addr;
} else if (resource_type(res) == IORESOURCE_MEM) {
res->flags &= ~IORESOURCE_MEM_64;
}
pci_add_resource_offset(resources, res, res->start - range.pci_addr);
}
/* Check for dma-ranges property */
if (!ib_resources)
return 0;
err = of_pci_dma_range_parser_init(&parser, dev_node);
if (err)
return 0;
dev_dbg(dev, "Parsing dma-ranges property...\n");
for_each_of_pci_range(&parser, &range) {
/*
* If we failed translation or got a zero-sized region
* then skip this range
*/
if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) ||
range.cpu_addr == OF_BAD_ADDR || range.size == 0)
continue;
dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
"IB MEM", range.cpu_addr,
range.cpu_addr + range.size - 1, range.pci_addr);
err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
if (err)
continue;
res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
if (!res) {
err = -ENOMEM;
goto failed;
}
pci_add_resource_offset(ib_resources, res,
res->start - range.pci_addr);
}
return 0;
failed:
pci_free_resource_list(resources);
return err;
}
#if IS_ENABLED(CONFIG_OF_IRQ)
/**
* of_irq_parse_pci - Resolve the interrupt for a PCI device
* @pdev: the device whose interrupt is to be resolved
* @out_irq: structure of_phandle_args filled by this function
*
* This function resolves the PCI interrupt for a given PCI device. If a
* device-node exists for a given pci_dev, it will use normal OF tree
* walking. If not, it will implement standard swizzling and walk up the
* PCI tree until an device-node is found, at which point it will finish
* resolving using the OF tree walking.
*/
static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
struct device_node *dn, *ppnode = NULL;
struct pci_dev *ppdev;
__be32 laddr[3];
u8 pin;
int rc;
/*
* Check if we have a device node, if yes, fallback to standard
* device tree parsing
*/
dn = pci_device_to_OF_node(pdev);
if (dn) {
rc = of_irq_parse_one(dn, 0, out_irq);
if (!rc)
return rc;
}
/*
* Ok, we don't, time to have fun. Let's start by building up an
* interrupt spec. we assume #interrupt-cells is 1, which is standard
* for PCI. If you do different, then don't use that routine.
*/
rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
if (rc != 0)
goto err;
/* No pin, exit with no error message. */
if (pin == 0)
return -ENODEV;
/* Local interrupt-map in the device node? Use it! */
if (of_property_present(dn, "interrupt-map")) {
pin = pci_swizzle_interrupt_pin(pdev, pin);
ppnode = dn;
}
/* Now we walk up the PCI tree */
while (!ppnode) {
/* Get the pci_dev of our parent */
ppdev = pdev->bus->self;
/* Ouch, it's a host bridge... */
if (ppdev == NULL) {
ppnode = pci_bus_to_OF_node(pdev->bus);
/* No node for host bridge ? give up */
if (ppnode == NULL) {
rc = -EINVAL;
goto err;
}
} else {
/* We found a P2P bridge, check if it has a node */
ppnode = pci_device_to_OF_node(ppdev);
}
/*
* Ok, we have found a parent with a device-node, hand over to
* the OF parsing code.
* We build a unit address from the linux device to be used for
* resolution. Note that we use the linux bus number which may
* not match your firmware bus numbering.
* Fortunately, in most cases, interrupt-map-mask doesn't
* include the bus number as part of the matching.
* You should still be careful about that though if you intend
* to rely on this function (you ship a firmware that doesn't
* create device nodes for all PCI devices).
*/
if (ppnode)
break;
/*
* We can only get here if we hit a P2P bridge with no node;
* let's do standard swizzling and try again
*/
pin = pci_swizzle_interrupt_pin(pdev, pin);
pdev = ppdev;
}
out_irq->np = ppnode;
out_irq->args_count = 1;
out_irq->args[0] = pin;
laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
laddr[1] = laddr[2] = cpu_to_be32(0);
rc = of_irq_parse_raw(laddr, out_irq);
if (rc)
goto err;
return 0;
err:
if (rc == -ENOENT) {
dev_warn(&pdev->dev,
"%s: no interrupt-map found, INTx interrupts not available\n",
__func__);
pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n",
__func__);
} else {
dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc);
}
return rc;
}
/**
* of_irq_parse_and_map_pci() - Decode a PCI IRQ from the device tree and map to a VIRQ
* @dev: The PCI device needing an IRQ
* @slot: PCI slot number; passed when used as map_irq callback. Unused
* @pin: PCI IRQ pin number; passed when used as map_irq callback. Unused
*
* @slot and @pin are unused, but included in the function so that this
* function can be used directly as the map_irq callback to
* pci_assign_irq() and struct pci_host_bridge.map_irq pointer
*/
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct of_phandle_args oirq;
int ret;
ret = of_irq_parse_pci(dev, &oirq);
if (ret)
return 0; /* Proper return code 0 == NO_IRQ */
return irq_create_of_mapping(&oirq);
}
EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
#endif /* CONFIG_OF_IRQ */
static int pci_parse_request_of_pci_ranges(struct device *dev,
struct pci_host_bridge *bridge)
{
int err, res_valid = 0;
resource_size_t iobase;
struct resource_entry *win, *tmp;
INIT_LIST_HEAD(&bridge->windows);
INIT_LIST_HEAD(&bridge->dma_ranges);
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows,
&bridge->dma_ranges, &iobase);
if (err)
return err;
err = devm_request_pci_bus_resources(dev, &bridge->windows);
if (err)
return err;
resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
resource_list_destroy_entry(win);
}
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
if (!(res->flags & IORESOURCE_PREFETCH))
if (upper_32_bits(resource_size(res)))
dev_warn(dev, "Memory resource size exceeds max for 32 bits\n");
break;
}
}
if (!res_valid)
dev_warn(dev, "non-prefetchable memory resource required\n");
return 0;
}
int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge)
{
if (!dev->of_node)
return 0;
bridge->swizzle_irq = pci_common_swizzle;
bridge->map_irq = of_irq_parse_and_map_pci;
return pci_parse_request_of_pci_ranges(dev, bridge);
}
#ifdef CONFIG_PCI_DYNAMIC_OF_NODES
void of_pci_remove_node(struct pci_dev *pdev)
{
struct device_node *np;
np = pci_device_to_OF_node(pdev);
if (!np || !of_node_check_flag(np, OF_DYNAMIC))
return;
pdev->dev.of_node = NULL;
of_changeset_revert(np->data);
of_changeset_destroy(np->data);
of_node_put(np);
}
void of_pci_make_dev_node(struct pci_dev *pdev)
{
struct device_node *ppnode, *np = NULL;
const char *pci_type;
struct of_changeset *cset;
const char *name;
int ret;
/*
* If there is already a device tree node linked to this device,
* return immediately.
*/
if (pci_device_to_OF_node(pdev))
return;
/* Check if there is device tree node for parent device */
if (!pdev->bus->self)
ppnode = pdev->bus->dev.of_node;
else
ppnode = pdev->bus->self->dev.of_node;
if (!ppnode)
return;
if (pci_is_bridge(pdev))
pci_type = "pci";
else
pci_type = "dev";
name = kasprintf(GFP_KERNEL, "%s@%x,%x", pci_type,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
if (!name)
return;
cset = kmalloc(sizeof(*cset), GFP_KERNEL);
if (!cset)
goto out_free_name;
of_changeset_init(cset);
np = of_changeset_create_node(cset, ppnode, name);
if (!np)
goto out_destroy_cset;
ret = of_pci_add_properties(pdev, cset, np);
if (ret)
goto out_free_node;
ret = of_changeset_apply(cset);
if (ret)
goto out_free_node;
np->data = cset;
pdev->dev.of_node = np;
kfree(name);
return;
out_free_node:
of_node_put(np);
out_destroy_cset:
of_changeset_destroy(cset);
kfree(cset);
out_free_name:
kfree(name);
}
#endif
#endif /* CONFIG_PCI */
/**
* of_pci_get_max_link_speed - Find the maximum link speed of the given device node.
* @node: Device tree node with the maximum link speed information.
*
* This function will try to find the limitation of link speed by finding
* a property called "max-link-speed" of the given device node.
*
* Return:
* * > 0 - On success, a maximum link speed.
* * -EINVAL - Invalid "max-link-speed" property value, or failure to access
* the property of the device tree node.
*
* Returns the associated max link speed from DT, or a negative value if the
* required property is not found or is invalid.
*/
int of_pci_get_max_link_speed(struct device_node *node)
{
u32 max_link_speed;
if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
max_link_speed == 0 || max_link_speed > 4)
return -EINVAL;
return max_link_speed;
}
EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
/**
* of_pci_get_slot_power_limit - Parses the "slot-power-limit-milliwatt"
* property.
*
* @node: device tree node with the slot power limit information
* @slot_power_limit_value: pointer where the value should be stored in PCIe
* Slot Capabilities Register format
* @slot_power_limit_scale: pointer where the scale should be stored in PCIe
* Slot Capabilities Register format
*
* Returns the slot power limit in milliwatts and if @slot_power_limit_value
* and @slot_power_limit_scale pointers are non-NULL, fills in the value and
* scale in format used by PCIe Slot Capabilities Register.
*
* If the property is not found or is invalid, returns 0.
*/
u32 of_pci_get_slot_power_limit(struct device_node *node,
u8 *slot_power_limit_value,
u8 *slot_power_limit_scale)
{
u32 slot_power_limit_mw;
u8 value, scale;
if (of_property_read_u32(node, "slot-power-limit-milliwatt",
&slot_power_limit_mw))
slot_power_limit_mw = 0;
/* Calculate Slot Power Limit Value and Slot Power Limit Scale */
if (slot_power_limit_mw == 0) {
value = 0x00;
scale = 0;
} else if (slot_power_limit_mw <= 255) {
value = slot_power_limit_mw;
scale = 3;
} else if (slot_power_limit_mw <= 255*10) {
value = slot_power_limit_mw / 10;
scale = 2;
slot_power_limit_mw = slot_power_limit_mw / 10 * 10;
} else if (slot_power_limit_mw <= 255*100) {
value = slot_power_limit_mw / 100;
scale = 1;
slot_power_limit_mw = slot_power_limit_mw / 100 * 100;
} else if (slot_power_limit_mw <= 239*1000) {
value = slot_power_limit_mw / 1000;
scale = 0;
slot_power_limit_mw = slot_power_limit_mw / 1000 * 1000;
} else if (slot_power_limit_mw < 250*1000) {
value = 0xEF;
scale = 0;
slot_power_limit_mw = 239*1000;
} else if (slot_power_limit_mw <= 600*1000) {
value = 0xF0 + (slot_power_limit_mw / 1000 - 250) / 25;
scale = 0;
slot_power_limit_mw = slot_power_limit_mw / (1000*25) * (1000*25);
} else {
value = 0xFE;
scale = 0;
slot_power_limit_mw = 600*1000;
}
if (slot_power_limit_value)
*slot_power_limit_value = value;
if (slot_power_limit_scale)
*slot_power_limit_scale = scale;
return slot_power_limit_mw;
}
EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit);
|