blob: ea187a69e923b4d05f5484da981e9a50d0180adc [file] [log] [blame]
AlexeiFedorov9f0dc012024-09-10 10:22:06 +01001/*
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +01002 * Copyright (c) 2024-2025, Arm Limited. All rights reserved.
AlexeiFedorov9f0dc012024-09-10 10:22:06 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
Soby Mathew5929bfe2024-11-28 12:28:00 +00008#include <errno.h>
AlexeiFedorov9f2de632024-09-10 11:48:22 +01009#include <stddef.h>
10
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010011#include <debug.h>
12#include <mmio.h>
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010013#include <pcie.h>
Soby Mathew5929bfe2024-11-28 12:28:00 +000014#include <pcie_doe.h>
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010015#include <pcie_spec.h>
Soby Mathew2c2810f2024-11-15 17:11:24 +000016#include <platform.h>
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +010017#include <plat_pcie_enum.h>
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010018#include <tftf_lib.h>
19
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010020#define PCIE_DEBUG VERBOSE
21
Soby Mathew2c2810f2024-11-15 17:11:24 +000022const struct pcie_info_table *g_pcie_info_table;
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +010023static pcie_device_bdf_table_t *g_pcie_bdf_table;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010024
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +010025static pcie_device_bdf_table_t pcie_bdf_table[PCIE_DEVICE_BDF_TABLE_SZ];
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010026
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +010027static uint32_t g_pcie_index;
28static uint32_t g_enumerate;
29
30/* 64-bit address initialisation */
31static uint64_t g_bar64_p_start;
32static uint64_t g_rp_bar64_value;
33static uint64_t g_bar64_p_max;
34static uint32_t g_64_bus, g_bar64_size;
35
36/* 32-bit address initialisation */
37static uint32_t g_bar32_np_start;
38static uint32_t g_bar32_p_start;
39static uint32_t g_rp_bar32_value;
40static uint32_t g_bar32_np_max;
41static uint32_t g_bar32_p_max;
42static uint32_t g_np_bar_size, g_p_bar_size;
43static uint32_t g_np_bus, g_p_bus;
44
45static uintptr_t pcie_cfg_addr(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010046{
47 uint32_t bus = PCIE_EXTRACT_BDF_BUS(bdf);
48 uint32_t dev = PCIE_EXTRACT_BDF_DEV(bdf);
49 uint32_t func = PCIE_EXTRACT_BDF_FUNC(bdf);
50 uint32_t segment = PCIE_EXTRACT_BDF_SEG(bdf);
51 uint32_t cfg_addr;
52 uintptr_t ecam_base = 0;
53 unsigned int i = 0;
54
55 assert((bus < PCIE_MAX_BUS) && (dev < PCIE_MAX_DEV) && (func < PCIE_MAX_FUNC));
56 assert(g_pcie_info_table != NULL);
57
58 while (i < g_pcie_info_table->num_entries) {
59 /* Derive ECAM specific information */
60 const pcie_info_block_t *block = &g_pcie_info_table->block[i];
61
62 if ((bus >= block->start_bus_num) &&
63 (bus <= block->end_bus_num) &&
64 (segment == block->segment_num)) {
65 ecam_base = block->ecam_base;
66 break;
67 }
68 i++;
69 }
70
71 assert(ecam_base != 0);
72
73 /*
74 * There are 8 functions / device
75 * 32 devices / Bus and each has a 4KB config space
76 */
77 cfg_addr = (bus * PCIE_MAX_DEV * PCIE_MAX_FUNC * PCIE_CFG_SIZE) +
78 (dev * PCIE_MAX_FUNC * PCIE_CFG_SIZE) + (func * PCIE_CFG_SIZE);
79
80 return ecam_base + cfg_addr;
81}
82
83/*
84 * @brief This API reads 32-bit data from PCIe config space pointed by Bus,
85 * Device, Function and register offset.
86 * 1. Caller - Test Suite
87 * 2. Prerequisite - pcie_create_info_table
88 * @param bdf - concatenated Bus(8-bits), device(8-bits) & function(8-bits)
89 * @param offset - Register offset within a device PCIe config space
90 *
91 * @return 32-bit data read from the config space
92 */
93uint32_t pcie_read_cfg(uint32_t bdf, uint32_t offset)
94{
95 uintptr_t addr = pcie_cfg_addr(bdf);
96
97 return mmio_read_32(addr + offset);
98}
99
100/*
101 * @brief This API writes 32-bit data to PCIe config space pointed by Bus,
102 * Device, Function and register offset.
103 * 1. Caller - Test Suite
104 * 2. Prerequisite - val_pcie_create_info_table
105 * @param bdf - concatenated Bus(8-bits), device(8-bits) & function(8-bits)
106 * @param offset - Register offset within a device PCIe config space
107 * @param data - data to be written to the config space
108 *
109 * @return None
110 */
111void pcie_write_cfg(uint32_t bdf, uint32_t offset, uint32_t data)
112{
113 uintptr_t addr = pcie_cfg_addr(bdf);
114
115 mmio_write_32(addr + offset, data);
116}
117
118/*
119 * @brief Check if BDF is PCIe Host Bridge.
120 *
121 * @param bdf - Function's Segment/Bus/Dev/Func in PCIE_CREATE_BDF format
122 * @return false If not a Host Bridge, true If it's a Host Bridge.
123 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100124static bool pcie_is_host_bridge(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100125{
126 uint32_t reg_value = pcie_read_cfg(bdf, TYPE01_RIDR);
127
128 if ((HB_BASE_CLASS == ((reg_value >> CC_BASE_SHIFT) & CC_BASE_MASK)) &&
129 (HB_SUB_CLASS == ((reg_value >> CC_SUB_SHIFT) & CC_SUB_MASK))) {
130 return true;
131 }
132
133 return false;
134}
135
136/*
137 * @brief Find a Function's config capability offset matching it's input parameter
138 * cid. cid_offset set to the matching cpability offset w.r.t. zero.
139 *
140 * @param bdf - Segment/Bus/Dev/Func in the format of PCIE_CREATE_BDF
141 * @param cid - Capability ID
142 * @param cid_offset - On return, points to cid offset in Function config space
143 * @return PCIE_CAP_NOT_FOUND, if there was a failure in finding required capability.
144 * PCIE_SUCCESS, if the search was successful.
145 */
146uint32_t pcie_find_capability(uint32_t bdf, uint32_t cid_type, uint32_t cid,
147 uint32_t *cid_offset)
148{
149 uint32_t reg_value, next_cap_offset;
150
151 if (cid_type == PCIE_CAP) {
152 /* Search in PCIe configuration space */
153 reg_value = pcie_read_cfg(bdf, TYPE01_CPR);
154
155 next_cap_offset = (reg_value & TYPE01_CPR_MASK);
156 while (next_cap_offset != 0) {
157 reg_value = pcie_read_cfg(bdf, next_cap_offset);
158 if ((reg_value & PCIE_CIDR_MASK) == cid) {
159 *cid_offset = next_cap_offset;
160 return PCIE_SUCCESS;
161 }
162 next_cap_offset = ((reg_value >> PCIE_NCPR_SHIFT) &
163 PCIE_NCPR_MASK);
164 }
165 } else if (cid_type == PCIE_ECAP) {
166 /* Search in PCIe extended configuration space */
167 next_cap_offset = PCIE_ECAP_START;
168 while (next_cap_offset != 0) {
169 reg_value = pcie_read_cfg(bdf, next_cap_offset);
170 if ((reg_value & PCIE_ECAP_CIDR_MASK) == cid) {
171 *cid_offset = next_cap_offset;
172 return PCIE_SUCCESS;
173 }
174 next_cap_offset = ((reg_value >> PCIE_ECAP_NCPR_SHIFT) &
175 PCIE_ECAP_NCPR_MASK);
176 }
177 }
178
179 /* The capability was not found */
180 return PCIE_CAP_NOT_FOUND;
181}
182
183/*
184 * @brief This API is used as placeholder to check if the bdf
185 * obtained is valid or not
186 *
187 * @param bdf
188 * @return true if bdf is valid else false
189 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100190static bool pcie_check_device_valid(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100191{
192 (void) bdf;
193 /*
194 * Add BDFs to this function if PCIe tests
195 * need to be ignored for a BDF for any reason
196 */
197 return true;
198}
199
200/*
201 * @brief Returns whether a PCIe Function is an on-chip peripheral or not
202 *
203 * @param bdf - Segment/Bus/Dev/Func in the format of PCIE_CREATE_BDF
204 * @return Returns TRUE if the Function is on-chip peripheral, FALSE if it is
205 * not an on-chip peripheral
206 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100207static bool pcie_is_onchip_peripheral(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100208{
209 (void)bdf;
210 return false;
211}
212
213/*
214 * @brief Returns the type of pcie device or port for the given bdf
215 *
216 * @param bdf - Segment/Bus/Dev/Func in the format of PCIE_CREATE_BDF
217 * @return Returns (1 << 0b1001) for RCiEP, (1 << 0b1010) for RCEC,
218 * (1 << 0b0000) for EP, (1 << 0b0100) for RP,
219 * (1 << 0b1100) for iEP_EP, (1 << 0b1011) for iEP_RP,
220 * (1 << PCIECR[7:4]) for any other device type.
221 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100222static uint32_t pcie_device_port_type(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100223{
224 uint32_t pciecs_base, reg_value, dp_type;
225
226 /*
227 * Get the PCI Express Capability structure offset and
228 * use that offset to read pci express capabilities register
229 */
230 pcie_find_capability(bdf, PCIE_CAP, CID_PCIECS, &pciecs_base);
231 reg_value = pcie_read_cfg(bdf, pciecs_base + CIDR_OFFSET);
232
233 /* Read Device/Port bits [7:4] in Function's PCIe Capabilities register */
234 dp_type = (reg_value >> ((PCIECR_OFFSET - CIDR_OFFSET)*8 +
235 PCIECR_DPT_SHIFT)) & PCIECR_DPT_MASK;
236 dp_type = (1 << dp_type);
237
238 /* Check if the device/port is an on-chip peripheral */
239 if (pcie_is_onchip_peripheral(bdf)) {
240 if (dp_type == EP) {
241 dp_type = iEP_EP;
242 } else if (dp_type == RP) {
243 dp_type = iEP_RP;
244 }
245 }
246
247 /* Return device/port type */
248 return dp_type;
249}
250
251/*
252 * @brief Returns BDF of the upstream Root Port of a pcie device function.
253 *
254 * @param bdf - Function's Segment/Bus/Dev/Func in PCIE_CREATE_BDF format
255 * @param usrp_bdf - Upstream Rootport bdf in PCIE_CREATE_BDF format
256 * @return 0 for success, 1 for failure.
257 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100258static uint32_t pcie_get_rootport(uint32_t bdf, uint32_t *rp_bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100259{
260 uint32_t seg_num, sec_bus, sub_bus;
261 uint32_t reg_value, dp_type, index = 0;
262
263 dp_type = pcie_device_port_type(bdf);
264
265 PCIE_DEBUG("DP type 0x%x\n", dp_type);
266
267 /* If the device is RP or iEP_RP, set its rootport value to same */
268 if ((dp_type == RP) || (dp_type == iEP_RP)) {
269 *rp_bdf = bdf;
270 return 0;
271 }
272
273 /* If the device is RCiEP and RCEC, set RP as 0xff */
274 if ((dp_type == RCiEP) || (dp_type == RCEC)) {
275 *rp_bdf = 0xffffffff;
276 return 1;
277 }
278
Soby Mathew2c2810f2024-11-15 17:11:24 +0000279 assert(g_pcie_bdf_table != NULL);
280
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100281 while (index < g_pcie_bdf_table->num_entries) {
282 *rp_bdf = g_pcie_bdf_table->device[index++].bdf;
283
284 /*
285 * Extract Secondary and Subordinate Bus numbers of the
286 * upstream Root port and check if the input function's
287 * bus number falls within that range.
288 */
289 reg_value = pcie_read_cfg(*rp_bdf, TYPE1_PBN);
290 seg_num = PCIE_EXTRACT_BDF_SEG(*rp_bdf);
291 sec_bus = ((reg_value >> SECBN_SHIFT) & SECBN_MASK);
292 sub_bus = ((reg_value >> SUBBN_SHIFT) & SUBBN_MASK);
293 dp_type = pcie_device_port_type(*rp_bdf);
294
295 if (((dp_type == RP) || (dp_type == iEP_RP)) &&
296 (sec_bus <= PCIE_EXTRACT_BDF_BUS(bdf)) &&
297 (sub_bus >= PCIE_EXTRACT_BDF_BUS(bdf)) &&
298 (seg_num == PCIE_EXTRACT_BDF_SEG(bdf)))
299 return 0;
300 }
301
302 /* Return failure */
303 ERROR("PCIe Hierarchy fail: RP of bdf 0x%x not found\n", bdf);
304 *rp_bdf = 0;
305 return 1;
306}
307
308/*
309 * @brief Sanity checks that all Endpoints must have a Rootport
310 *
311 * @param None
312 * @return 0 if sanity check passes, 1 if sanity check fails
313 */
314static uint32_t pcie_populate_device_rootport(void)
315{
316 uint32_t bdf, rp_bdf;
317 pcie_device_bdf_table_t *bdf_tbl_ptr = g_pcie_bdf_table;
318
Soby Mathew2c2810f2024-11-15 17:11:24 +0000319 assert(bdf_tbl_ptr != NULL);
320
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100321 for (unsigned int tbl_index = 0; tbl_index < bdf_tbl_ptr->num_entries;
322 tbl_index++) {
323 bdf = bdf_tbl_ptr->device[tbl_index].bdf;
324
325 /* Checks if the BDF has RootPort */
326 pcie_get_rootport(bdf, &rp_bdf);
327
328 bdf_tbl_ptr->device[tbl_index].rp_bdf = rp_bdf;
329 PCIE_DEBUG("Dev bdf: 0x%x RP bdf: 0x%x\n", bdf, rp_bdf);
330 }
331
332 return 0;
333}
334
335/*
336 * @brief Returns the BDF Table pointer
337 *
338 * @param None
339 *
340 * @return BDF Table pointer
341 */
342pcie_device_bdf_table_t *pcie_get_bdf_table(void)
343{
Soby Mathew2c2810f2024-11-15 17:11:24 +0000344 assert(g_pcie_bdf_table != NULL);
345
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100346 return g_pcie_bdf_table;
347}
348
349/*
350 * @brief This API creates the device bdf table from enumeration
351 *
352 * @param None
353 *
354 * @return None
355 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100356static void pcie_create_device_bdf_table(void)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100357{
358 uint32_t seg_num, start_bus, end_bus;
359 uint32_t bus_index, dev_index, func_index, ecam_index;
360 uint32_t bdf, reg_value, cid_offset, status;
361
362 assert(g_pcie_bdf_table != NULL);
363
364 g_pcie_bdf_table->num_entries = 0;
Soby Mathew2c2810f2024-11-15 17:11:24 +0000365
366 assert(g_pcie_info_table != NULL);
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100367 assert(g_pcie_info_table->num_entries != 0);
368
369 for (ecam_index = 0; ecam_index < g_pcie_info_table->num_entries; ecam_index++) {
370 /* Derive ECAM specific information */
371 const pcie_info_block_t *block = &g_pcie_info_table->block[ecam_index];
372
373 seg_num = block->segment_num;
374 start_bus = block->start_bus_num;
375 end_bus = block->end_bus_num;
376
377 /* Iterate over all buses, devices and functions in this ecam */
378 for (bus_index = start_bus; bus_index <= end_bus; bus_index++) {
379 for (dev_index = 0; dev_index < PCIE_MAX_DEV; dev_index++) {
380 for (func_index = 0; func_index < PCIE_MAX_FUNC; func_index++) {
381 /* Form BDF using seg, bus, device, function numbers */
382 bdf = PCIE_CREATE_BDF(seg_num, bus_index, dev_index,
383 func_index);
384
385 /* Probe PCIe device Function with this BDF */
386 reg_value = pcie_read_cfg(bdf, TYPE01_VIDR);
387
388 /* Store the Function's BDF if there was a valid response */
389 if (reg_value != PCIE_UNKNOWN_RESPONSE) {
390 /* Skip if the device is a host bridge */
391 if (pcie_is_host_bridge(bdf)) {
392 continue;
393 }
394
395 /* Skip if the device is a PCI legacy device */
396 if (pcie_find_capability(bdf, PCIE_CAP,
397 CID_PCIECS, &cid_offset) != PCIE_SUCCESS) {
398 continue;
399 }
400
401 status = pcie_check_device_valid(bdf);
402 if (!status) {
403 continue;
404 }
405
406 g_pcie_bdf_table->device[
407 g_pcie_bdf_table->num_entries++].bdf = bdf;
408 }
409 }
410 }
411 }
412 }
413
414 /* Sanity Check : Confirm all EP (normal, integrated) have a rootport */
415 pcie_populate_device_rootport();
416 INFO("Number of BDFs found : %u\n", g_pcie_bdf_table->num_entries);
417}
418
419/*
420 * @brief Returns the header type of the input pcie device function
421 *
422 * @param bdf - Segment/Bus/Dev/Func in the format of PCIE_CREATE_BDF
423 * @return TYPE0_HEADER for functions with Type 0 config space header,
424 * TYPE1_HEADER for functions with Type 1 config space header,
425 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100426static uint32_t pcie_function_header_type(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100427{
428 /* Read four bytes of config space starting from cache line size register */
429 uint32_t reg_value = pcie_read_cfg(bdf, TYPE01_CLSR);
430
431 /* Extract header type register value */
432 reg_value = ((reg_value >> TYPE01_HTR_SHIFT) & TYPE01_HTR_MASK);
433
434 /* Header layout bits within header type register indicate the header type */
435 return ((reg_value >> HTR_HL_SHIFT) & HTR_HL_MASK);
436}
437
438/*
439 * @brief Returns the ECAM address of the input PCIe function
440 *
441 * @param bdf - Segment/Bus/Dev/Func in PCIE_CREATE_BDF format
442 * @return ECAM address if success, else NULL address
443 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100444static uintptr_t pcie_get_ecam_base(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100445{
446 uint8_t ecam_index = 0, sec_bus = 0, sub_bus;
447 uint16_t seg_num = (uint16_t)PCIE_EXTRACT_BDF_SEG(bdf);
448 uint32_t reg_value;
449 uintptr_t ecam_base = 0;
450
Soby Mathew2c2810f2024-11-15 17:11:24 +0000451 assert(g_pcie_info_table != NULL);
452
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100453 while (ecam_index < g_pcie_info_table->num_entries) {
454 /* Derive ECAM specific information */
455 const pcie_info_block_t *block = &g_pcie_info_table->block[ecam_index];
456
457 if (seg_num == block->segment_num) {
458 if (pcie_function_header_type(bdf) == TYPE0_HEADER) {
459 /* Return ecam_base if Type0 Header */
460 ecam_base = block->ecam_base;
461 break;
462 }
463
464 /* Check for Secondary/Subordinate bus if Type1 Header */
465 reg_value = pcie_read_cfg(bdf, TYPE1_PBN);
466 sec_bus = ((reg_value >> SECBN_SHIFT) & SECBN_MASK);
467 sub_bus = ((reg_value >> SUBBN_SHIFT) & SUBBN_MASK);
468
469 if ((sec_bus >= block->start_bus_num) &&
470 (sub_bus <= block->end_bus_num)) {
471 ecam_base = block->ecam_base;
472 break;
473 }
474 }
475 ecam_index++;
476 }
477
478 return ecam_base;
479}
480
481/*
482 * @brief This API prints all the PCIe Devices info
483 * 1. Caller - Validation layer.
484 * 2. Prerequisite - val_pcie_create_info_table()
485 * @param None
486 * @return None
487 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100488static void pcie_print_device_info(void)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100489{
490 uint32_t bdf, dp_type;
491 uint32_t tbl_index = 0;
492 uint32_t ecam_index = 0;
493 uint32_t ecam_base, ecam_start_bus, ecam_end_bus;
494 pcie_device_bdf_table_t *bdf_tbl_ptr = g_pcie_bdf_table;
Soby Mathew2c2810f2024-11-15 17:11:24 +0000495 uint32_t num_rciep __unused = 0, num_rcec __unused = 0;
496 uint32_t num_iep __unused = 0, num_irp __unused = 0;
497 uint32_t num_ep __unused = 0, num_rp __unused = 0;
498 uint32_t num_dp __unused = 0, num_up __unused = 0;
499 uint32_t num_pcie_pci __unused = 0, num_pci_pcie __unused = 0;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100500 uint32_t bdf_counter;
501
Soby Mathew2c2810f2024-11-15 17:11:24 +0000502 assert(bdf_tbl_ptr != NULL);
503
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100504 if (bdf_tbl_ptr->num_entries == 0) {
505 INFO("BDF Table: No RCiEP or iEP found\n");
506 return;
507 }
508
509 for (tbl_index = 0; tbl_index < bdf_tbl_ptr->num_entries; tbl_index++) {
510 bdf = bdf_tbl_ptr->device[tbl_index].bdf;
511 dp_type = pcie_device_port_type(bdf);
512
513 switch (dp_type) {
514 case RCiEP:
515 num_rciep++;
516 break;
517 case RCEC:
518 num_rcec++;
519 break;
520 case EP:
521 num_ep++;
522 break;
523 case RP:
524 num_rp++;
525 break;
526 case iEP_EP:
527 num_iep++;
528 break;
529 case iEP_RP:
530 num_irp++;
531 break;
532 case UP:
533 num_up++;
534 break;
535 case DP:
536 num_dp++;
537 break;
538 case PCI_PCIE:
539 num_pci_pcie++;
540 break;
541 case PCIE_PCI:
542 num_pcie_pci++;
543 break;
544 default:
545 ERROR("Unknown dp_type 0x%x\n", dp_type);
546 }
547 }
548
549 INFO("Number of RCiEP : %u\n", num_rciep);
550 INFO("Number of RCEC : %u\n", num_rcec);
551 INFO("Number of EP : %u\n", num_ep);
552 INFO("Number of RP : %u\n", num_rp);
553 INFO("Number of iEP_EP : %u\n", num_iep);
554 INFO("Number of iEP_RP : %u\n", num_irp);
555 INFO("Number of UP of switch : %u\n", num_up);
556 INFO("Number of DP of switch : %u\n", num_dp);
557 INFO("Number of PCI/PCIe Bridge: %u\n", num_pci_pcie);
558 INFO("Number of PCIe/PCI Bridge: %u\n", num_pcie_pci);
559
Soby Mathew2c2810f2024-11-15 17:11:24 +0000560 assert(g_pcie_info_table != NULL);
561
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100562 while (ecam_index < g_pcie_info_table->num_entries) {
563
564 /* Derive ECAM specific information */
565 const pcie_info_block_t *block = &g_pcie_info_table->block[ecam_index];
566
567 ecam_base = block->ecam_base;
568 ecam_start_bus = block->start_bus_num;
569 ecam_end_bus = block->end_bus_num;
570 tbl_index = 0;
571 bdf_counter = 0;
572
573 INFO("ECAM %u: base 0x%x\n", ecam_index, ecam_base);
574
575 while (tbl_index < bdf_tbl_ptr->num_entries) {
576 uint32_t seg_num, bus_num, dev_num, func_num;
Soby Mathew2c2810f2024-11-15 17:11:24 +0000577 uint32_t device_id __unused, vendor_id __unused, reg_value;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100578 uint32_t bdf, dev_ecam_base;
579
580 bdf = bdf_tbl_ptr->device[tbl_index++].bdf;
581 seg_num = PCIE_EXTRACT_BDF_SEG(bdf);
582 bus_num = PCIE_EXTRACT_BDF_BUS(bdf);
583 dev_num = PCIE_EXTRACT_BDF_DEV(bdf);
584 func_num = PCIE_EXTRACT_BDF_FUNC(bdf);
585
586 reg_value = pcie_read_cfg(bdf, TYPE01_VIDR);
587 device_id = (reg_value >> TYPE01_DIDR_SHIFT) & TYPE01_DIDR_MASK;
588 vendor_id = (reg_value >> TYPE01_VIDR_SHIFT) & TYPE01_VIDR_MASK;
589
590 dev_ecam_base = pcie_get_ecam_base(bdf);
591
592 if ((ecam_base == dev_ecam_base) &&
593 (bus_num >= ecam_start_bus) &&
594 (bus_num <= ecam_end_bus)) {
595 bdf_counter = 1;
596 bdf = PCIE_CREATE_BDF(seg_num, bus_num, dev_num, func_num);
597 INFO(" BDF: 0x%x\n", bdf);
598 INFO(" Seg: 0x%x Bus: 0x%x Dev: 0x%x "
599 "Func: 0x%x Dev ID: 0x%x Vendor ID: 0x%x\n",
600 seg_num, bus_num, dev_num, func_num,
601 device_id, vendor_id);
602 }
603 }
604
605 if (bdf_counter == 0) {
606 INFO(" No BDF devices in ECAM region index %d\n", ecam_index);
607 }
608
609 ecam_index++;
610 }
611}
612
613/*
614 * @brief Create PCIe table and PCI enumeration
615 * @param void
616 * @return void
617 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100618static void pcie_create_info_table(void)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100619{
620 unsigned int num_ecam;
621
622 INFO("Creating PCIe info table\n");
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100623 g_pcie_bdf_table = pcie_bdf_table;
624
625 num_ecam = g_pcie_info_table->num_entries;
626 INFO("Number of ECAM regions : %u\n", num_ecam);
Soby Mathew2c2810f2024-11-15 17:11:24 +0000627 if ((num_ecam == 0) || (num_ecam > MAX_PCIE_INFO_ENTRIES)) {
628 ERROR("PCIe info entries invalid\n");
629 panic();
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100630 }
631 pcie_create_device_bdf_table();
632 pcie_print_device_info();
633}
Soby Mathew5929bfe2024-11-28 12:28:00 +0000634
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100635static void pal_pci_cfg_write(uint32_t bus, uint32_t dev, uint32_t func,
636 uint32_t offset, uint32_t data)
637{
638 pcie_write_cfg(PCIE_CREATE_BDF(0, bus, dev, func), offset, data);
639}
640
641static void pal_pci_cfg_read(uint32_t bus, uint32_t dev, uint32_t func,
642 uint32_t offset, uint32_t *value)
643{
644 *value = pcie_read_cfg(PCIE_CREATE_BDF(0, bus, dev, func), offset);
645}
646
647/*
648 * This API programs the Memory Base and Memeory limit register of the Bus,
649 * Device and Function of Type1 Header
650 */
651static void get_resource_base_32(uint32_t bus, uint32_t dev, uint32_t func,
652 uint32_t bar32_p_base, uint32_t bar32_np_base,
653 uint32_t bar32_p_limit, uint32_t bar32_np_limit)
654{
655 uint32_t mem_bar_np;
656 uint32_t mem_bar_p;
657
658 /* Update the 32 bit NP-BAR start address for the next iteration */
659 if (bar32_np_base != g_bar32_np_start) {
660 if ((g_bar32_np_start << 12) != 0) {
661 g_bar32_np_start = (g_bar32_np_start &
662 MEM_BASE32_LIM_MASK) + BAR_INCREMENT;
663 }
664
665 if (bar32_np_limit == g_bar32_np_start) {
666 bar32_np_limit = bar32_np_limit - BAR_INCREMENT;
667 }
668
669 pal_pci_cfg_read(bus, dev, func, NON_PRE_FET_OFFSET,
670 &mem_bar_np);
671 mem_bar_np = ((bar32_np_limit & MEM_BASE32_LIM_MASK) |
672 mem_bar_np);
673 pal_pci_cfg_write(bus, dev, func, NON_PRE_FET_OFFSET,
674 mem_bar_np);
675 }
676
677 /* Update the 32 bit P-BAR start address for the next iteration */
678 if (bar32_p_base != g_bar32_p_start) {
679 if ((g_bar32_p_start << 12) != 0) {
680 g_bar32_p_start = (g_bar32_p_start &
681 MEM_BASE32_LIM_MASK) + BAR_INCREMENT;
682 }
683
684 if (bar32_p_limit == g_bar32_p_start) {
685 bar32_p_limit = bar32_p_limit - BAR_INCREMENT;
686 }
687
688 pal_pci_cfg_read(bus, dev, func, PRE_FET_OFFSET, &mem_bar_p);
689 mem_bar_p = ((bar32_p_limit & MEM_BASE32_LIM_MASK) | mem_bar_p);
690 pal_pci_cfg_write(bus, dev, func, PRE_FET_OFFSET, mem_bar_p);
691 }
692}
693
694/*
695 * This API programs the Memory Base and Memeory limit register of the Bus,
696 * Device and Function of Type1 Header
697 */
698static void get_resource_base_64(uint32_t bus, uint32_t dev, uint32_t func,
699 uint64_t bar64_p_base, uint64_t g_bar64_p_max)
700{
701 uint32_t bar64_p_lower32_base = (uint32_t)bar64_p_base;
702 uint32_t bar64_p_upper32_base = (uint32_t)(bar64_p_base >> 32);
703 uint32_t bar64_p_lower32_limit = (uint32_t)g_bar64_p_max;
704 uint32_t bar64_p_upper32_limit = (uint32_t)(g_bar64_p_max >> 32);
705
706 /* Obtain the memory base and memory limit */
707 bar64_p_lower32_base = REG_MASK_SHIFT(bar64_p_lower32_base);
708 bar64_p_lower32_limit = REG_MASK_SHIFT(bar64_p_lower32_limit);
709 uint32_t mem_bar_p = ((bar64_p_lower32_limit << 16) |
710 bar64_p_lower32_base);
711
712 /* Configure Memory base and Memory limit register */
713 if ((bar64_p_base != g_bar64_p_max) && (g_bar64_p_start <=
714 g_bar64_p_max)) {
715 if ((g_bar64_p_start << 12) != 0) {
716 g_bar64_p_start = (g_bar64_p_start &
717 MEM_BASE64_LIM_MASK) + BAR_INCREMENT;
718 }
719
720 if (bar64_p_lower32_limit == g_bar64_p_start) {
721 bar64_p_lower32_limit = bar64_p_lower32_limit -
722 BAR_INCREMENT;
723 }
724
725 g_bar64_p_start = (g_bar64_p_start & MEM_BASE64_LIM_MASK) +
726 BAR_INCREMENT;
727
728 pal_pci_cfg_write(bus, dev, func, PRE_FET_OFFSET, mem_bar_p);
729 pal_pci_cfg_write(bus, dev, func, PRE_FET_OFFSET + 4,
730 bar64_p_upper32_base);
731 pal_pci_cfg_write(bus, dev, func, PRE_FET_OFFSET + 8,
732 bar64_p_upper32_limit);
733 }
734}
735
736static void pcie_rp_program_bar(uint32_t bus, uint32_t dev, uint32_t func)
737{
738 uint64_t bar_size, bar_upper_bits;
739 uint32_t offset = BAR0_OFFSET;
740 uint32_t bar_reg_value, bar_lower_bits;
741
742 while (offset <= TYPE1_BAR_MAX_OFF) {
743 pal_pci_cfg_read(bus, dev, func, offset, &bar_reg_value);
744
745 if (BAR_REG(bar_reg_value) == BAR_64_BIT) {
746 /*
747 * BAR supports 64-bit address therefore, write all 1's
748 * to BARn and BARn+1 and identify the size requested
749 */
750 pal_pci_cfg_write(bus, dev, func, offset, 0xFFFFFFF0);
751 pal_pci_cfg_write(bus, dev, func, offset + 4,
752 0xFFFFFFFF);
753 pal_pci_cfg_read(bus, dev, func, offset,
754 &bar_lower_bits);
755 bar_size = bar_lower_bits & BAR_MASK;
756
757 pal_pci_cfg_read(bus, dev, func, offset + 4,
758 &bar_reg_value);
759 bar_upper_bits = bar_reg_value;
760 bar_size = bar_size | (bar_upper_bits << 32);
761
762 bar_size = ~bar_size + 1;
763
764 /*
765 * If BAR size is 0, then BAR not implemented, move to
766 * next BAR
767 */
768 if (bar_size == 0) {
769 offset = offset + 8;
770 continue;
771 }
772
773 pal_pci_cfg_write(bus, dev, func, offset,
774 (uint32_t)g_rp_bar64_value);
775 pal_pci_cfg_write(bus, dev, func, offset + 4,
776 (uint32_t)(g_rp_bar64_value >> 32));
777 offset = offset + 8;
778 } else {
779 /*
780 * BAR supports 32-bit address. Write all 1's to BARn
781 * and identify the size requested
782 */
783 pal_pci_cfg_write(bus, dev, func, offset, 0xFFFFFFF0);
784 pal_pci_cfg_read(bus, dev, func, offset,
785 &bar_lower_bits);
786 bar_reg_value = bar_lower_bits & BAR_MASK;
787 bar_size = ~bar_reg_value + 1;
788
789 /*
790 * If BAR size is 0, then BAR not implemented, move to
791 * next BAR
792 */
793 if (bar_size == 0) {
794 offset = offset + 4;
795 continue;
796 }
797
798 pal_pci_cfg_write(bus, dev, func, offset,
799 g_rp_bar32_value);
800 g_rp_bar32_value = g_rp_bar32_value + (uint32_t)bar_size;
801 offset = offset + 4;
802 }
803 }
804}
805
806/*
807 * This API programs all the BAR register in PCIe config space pointed by Bus,
808 * Device and Function for an End Point PCIe device
809 */
810static void pcie_program_bar_reg(uint32_t bus, uint32_t dev, uint32_t func)
811{
812 uint64_t bar_size, bar_upper_bits;
813 uint32_t bar_reg_value, bar_lower_bits;
814 uint32_t offset = BAR0_OFFSET;
815 uint32_t np_bar_size = 0;
816 uint32_t p_bar_size = 0, p_bar64_size = 0;
817
818 while (offset <= TYPE0_BAR_MAX_OFF) {
819 pal_pci_cfg_read(bus, dev, func, offset, &bar_reg_value);
820
821 if (BAR_MEM(bar_reg_value) == BAR_PRE_MEM) {
822 if (BAR_REG(bar_reg_value) == BAR_64_BIT) {
823 /*
824 * BAR supports 64-bit address therefore,
825 * write all 1's to BARn and BARn+1 and identify
826 * the size requested
827 */
828
829 pal_pci_cfg_write(bus, dev, func, offset,
830 0xFFFFFFF0);
831 pal_pci_cfg_write(bus, dev, func, offset + 4,
832 0xFFFFFFFF);
833 pal_pci_cfg_read(bus, dev, func, offset,
834 &bar_lower_bits);
835 bar_size = bar_lower_bits & BAR_MASK;
836
837 pal_pci_cfg_read(bus, dev, func, offset + 4,
838 &bar_reg_value);
839 bar_upper_bits = bar_reg_value;
840 bar_size = bar_size | (bar_upper_bits << 32);
841
842 bar_size = ~bar_size + 1;
843
844 /*
845 * If BAR size is 0, then BAR not implemented,
846 * move to next BAR
847 */
848 if (bar_size == 0) {
849 offset = offset + 8;
850 continue;
851 }
852
853 /*
854 * If p_bar64_size = 0 and bus number is same as
855 * bus of previous bus number, then check if the
856 * current PCIe Device BAR size is greater than
857 * the previous BAR size, if yes then add current
858 * BAR size to the updated start address else
859 * add the previous BAR size to the updated
860 * start address
861 */
862 if ((p_bar64_size == 0) && ((g_64_bus == bus))) {
863 if (g_bar64_size < bar_size) {
864 g_bar64_p_start =
865 g_bar64_p_start +
866 bar_size;
867 } else {
868 g_bar64_p_start =
869 g_bar64_p_start +
870 g_bar64_size;
871 }
872 } else if ((g_bar64_size < bar_size) &&
873 (p_bar64_size != 0)) {
874 g_bar64_p_start = g_bar64_p_start +
875 bar_size;
876 } else {
877 g_bar64_p_start = g_bar64_p_start +
878 p_bar64_size;
879 }
880
881 pal_pci_cfg_write(bus, dev, func, offset,
882 (uint32_t)g_bar64_p_start);
883 pal_pci_cfg_write(bus, dev, func, offset + 4,
884 (uint32_t)(g_bar64_p_start >>
885 32));
886
887 p_bar64_size = (uint32_t)bar_size;
888 g_bar64_size = (uint32_t)bar_size;
889 g_64_bus = bus;
890 offset = offset + 8;
891 } else {
892 /*
893 * BAR supports 32-bit address. Write all 1's
894 * to BARn and identify the size requested
895 */
896 pal_pci_cfg_write(bus, dev, func, offset,
897 0xFFFFFFF0);
898 pal_pci_cfg_read(bus, dev, func, offset,
899 &bar_lower_bits);
900 bar_reg_value = bar_lower_bits & BAR_MASK;
901 bar_size = ~bar_reg_value + 1;
902
903 /*
904 * If BAR size is 0, then BAR not implemented,
905 * move to next BAR
906 */
907 if (bar_size == 0) {
908 offset = offset + 4;
909 continue;
910 }
911
912 /*
913 * If p_bar_size = 0 and bus number is same as
914 * bus of previous bus number, then check if the
915 * current PCIe Device BAR size is greater than
916 * the previous BAR size, if yes then add
917 * current BAR size to the updated start
918 * address else add the previous BAR size to the
919 * updated start address
920 */
921 if ((p_bar_size == 0) && ((g_p_bus == bus))) {
922 if (g_p_bar_size < bar_size) {
923 g_bar32_p_start =
924 g_bar32_p_start +
925 (uint32_t)bar_size;
926 } else {
927 g_bar32_p_start =
928 g_bar32_p_start +
929 g_p_bar_size;
930 }
931 } else if ((g_p_bar_size < bar_size) &&
932 (p_bar_size != 0)) {
933 g_bar32_p_start = g_bar32_p_start +
934 (uint32_t)bar_size;
935 } else {
936 g_bar32_p_start = g_bar32_p_start +
937 p_bar_size;
938 }
939
940 pal_pci_cfg_write(bus, dev, func, offset,
941 g_bar32_p_start);
942 p_bar_size = (uint32_t)bar_size;
943 g_p_bar_size = (uint32_t)bar_size;
944 g_p_bus = bus;
945
946 offset = offset + 4;
947 }
948 } else {
949 /*
950 * BAR supports 32-bit address. Write all 1's to BARn
951 * and identify the size requested
952 */
953 pal_pci_cfg_write(bus, dev, func, offset, 0xFFFFFFF0);
954 pal_pci_cfg_read(bus, dev, func, offset,
955 &bar_lower_bits);
956 bar_reg_value = bar_lower_bits & BAR_MASK;
957 bar_size = ~bar_reg_value + 1;
958
959 /*
960 * If BAR size is 0, then BAR not implemented, move to
961 * next BAR
962 */
963 if (bar_size == 0) {
964 if (BAR_REG(bar_lower_bits) == BAR_64_BIT) {
965 offset = offset + 8;
966 }
967
968 if (BAR_REG(bar_lower_bits) == BAR_32_BIT) {
969 offset = offset + 4;
970 }
971
972 continue;
973 }
974
975 /*
976 * If np_bar_size = 0 and bus number is same as bus of
977 * previous bus number, then check if the current PCIe
978 * Device BAR size is greater than the previous BAR
979 * size, if yes then add current BAR size to the
980 * updated start address else add the previous BAR size
981 * to the updated start address
982 */
983 if ((np_bar_size == 0) && ((g_np_bus == bus))) {
984 if (g_np_bar_size < bar_size) {
985 g_bar32_np_start = g_bar32_np_start +
986 (uint32_t)bar_size;
987 } else {
988 g_bar32_np_start = g_bar32_np_start +
989 g_np_bar_size;
990 }
991 } else if ((g_np_bar_size < bar_size) &&
992 (np_bar_size != 0)) {
993 g_bar32_np_start = g_bar32_np_start +
994 (uint32_t)bar_size;
995 } else {
996 g_bar32_np_start = g_bar32_np_start +
997 np_bar_size;
998 }
999
1000 pal_pci_cfg_write(bus, dev, func, offset,
1001 g_bar32_np_start);
1002 np_bar_size = (uint32_t)bar_size;
1003 g_np_bar_size = (uint32_t)bar_size;
1004 g_np_bus = bus;
1005
1006 pal_pci_cfg_read(bus, dev, func, offset, &bar_reg_value);
1007 if (BAR_REG(bar_reg_value) == BAR_64_BIT) {
1008 pal_pci_cfg_write(bus, dev, func,
1009 offset + 4, 0);
1010 offset = offset + 8;
1011 }
1012
1013 if (BAR_REG(bar_reg_value) == BAR_32_BIT) {
1014 offset = offset + 4;
1015 }
1016 }
1017
1018 g_bar32_p_max = g_bar32_p_start;
1019 g_bar32_np_max = g_bar32_np_start;
1020 g_bar64_p_max = g_bar64_p_start;
1021 }
1022}
1023
1024/*
1025 * This API performs the PCIe bus enumeration
1026 *
1027 * bus,sec_bus - Bus(8-bits), secondary bus (8-bits)
1028 * sub_bus - Subordinate bus
1029 */
1030static uint32_t pcie_enumerate_device(uint32_t bus, uint32_t sec_bus)
1031{
1032 uint32_t vendor_id = 0;
1033 uint32_t header_value;
1034 uint32_t sub_bus = bus;
1035 uint32_t dev;
1036 uint32_t func;
1037 uint32_t class_code;
1038 uint32_t com_reg_value;
1039 uint32_t bar32_p_limit;
1040 uint32_t bar32_np_limit;
1041 uint32_t bar32_p_base = g_bar32_p_start;
1042 uint32_t bar32_np_base = g_bar32_np_start;
1043 uint64_t bar64_p_base = g_bar64_p_start;
1044
1045 if (bus == ((g_pcie_info_table->block[g_pcie_index].end_bus_num) + 1)) {
1046 return sub_bus;
1047 }
1048
1049 for (dev = 0; dev < PCIE_MAX_DEV; dev++) {
1050 for (func = 0; func < PCIE_MAX_FUNC; func++) {
1051 pal_pci_cfg_read(bus, dev, func, 0, &vendor_id);
1052
1053 if ((vendor_id == 0x0) || (vendor_id == 0xFFFFFFFF)) {
1054 continue;
1055 }
1056
1057 /* Skip Hostbridge configuration */
1058 pal_pci_cfg_read(bus, dev, func, TYPE01_RIDR,
1059 &class_code);
1060
1061 if ((((class_code >> CC_BASE_SHIFT) & CC_BASE_MASK) ==
1062 HB_BASE_CLASS) &&
1063 (((class_code >> CC_SUB_SHIFT) & CC_SUB_MASK)) ==
1064 HB_SUB_CLASS) {
1065 continue;
1066 }
1067
1068 pal_pci_cfg_read(bus, dev, func, HEADER_OFFSET,
1069 &header_value);
1070 if (PCIE_HEADER_TYPE(header_value) == TYPE1_HEADER) {
1071 /*
1072 * Enable memory access, Bus master enable and
1073 * I/O access
1074 */
1075 pal_pci_cfg_read(bus, dev, func,
1076 COMMAND_REG_OFFSET,
1077 &com_reg_value);
1078
1079 pal_pci_cfg_write(bus, dev, func,
1080 COMMAND_REG_OFFSET,
1081 (com_reg_value |
1082 REG_ACC_DATA));
1083
1084 pal_pci_cfg_write(bus, dev, func,
1085 BUS_NUM_REG_OFFSET,
1086 BUS_NUM_REG_CFG(0xFF, sec_bus,
1087 bus));
1088
1089 pal_pci_cfg_write(bus, dev, func,
1090 NON_PRE_FET_OFFSET,
1091 ((g_bar32_np_start >> 16) &
1092 0xFFF0));
1093
1094 pal_pci_cfg_write(bus, dev, func,
1095 PRE_FET_OFFSET,
1096 ((g_bar32_p_start >> 16) &
1097 0xFFF0));
1098
1099 sub_bus = pcie_enumerate_device(sec_bus,
1100 (sec_bus + 1));
1101 pal_pci_cfg_write(bus, dev, func,
1102 BUS_NUM_REG_OFFSET,
1103 BUS_NUM_REG_CFG(sub_bus,
1104 sec_bus, bus));
1105 sec_bus = sub_bus + 1;
1106
1107 /*
1108 * Obtain the start memory base address & the
1109 * final memory base address of 32 bit BAR
1110 */
1111 bar32_p_limit = g_bar32_p_max;
1112 bar32_np_limit = g_bar32_np_max;
1113
1114 get_resource_base_32(bus, dev, func,
1115 bar32_p_base,
1116 bar32_np_base,
1117 bar32_p_limit,
1118 bar32_np_limit);
1119
1120 /*
1121 * Obtain the start memory base address & the
1122 * final memory base address of 64 bit BAR
1123 */
1124 get_resource_base_64(bus, dev, func,
1125 bar64_p_base,
1126 g_bar64_p_max);
1127
1128 /* Update the BAR values of Type 1 Devices */
1129 pcie_rp_program_bar(bus, dev, func);
1130
1131 /* Update the base and limit values */
1132 bar32_p_base = g_bar32_p_start;
1133 bar32_np_base = g_bar32_np_start;
1134 bar64_p_base = g_bar64_p_start;
1135 }
1136
1137 if (PCIE_HEADER_TYPE(header_value) == TYPE0_HEADER) {
1138 pcie_program_bar_reg(bus, dev, func);
1139 sub_bus = sec_bus - 1;
1140 }
1141 }
1142 }
1143
1144 return sub_bus;
1145}
1146
1147/*
1148 * This API clears the primary bus number configured in the Type1 Header.
1149 * Note: This is done to make sure the hardware is compatible
1150 * with Linux enumeration.
1151 */
1152static void pcie_clear_pri_bus(void)
1153{
1154 uint32_t bus;
1155 uint32_t dev;
1156 uint32_t func;
1157 uint32_t bus_value;
1158 uint32_t header_value;
1159 uint32_t vendor_id;
1160
1161 for (bus = 0; bus <= g_pcie_info_table->block[g_pcie_index].end_bus_num;
1162 bus++) {
1163 for (dev = 0; dev < PCIE_MAX_DEV; dev++) {
1164 for (func = 0; func < PCIE_MAX_FUNC; func++) {
1165 pal_pci_cfg_read(bus, dev, func, 0, &vendor_id);
1166
1167 if ((vendor_id == 0x0) ||
1168 (vendor_id == 0xFFFFFFFF)) {
1169 continue;
1170 }
1171
1172 pal_pci_cfg_read(bus, dev, func, HEADER_OFFSET,
1173 &header_value);
1174 if (PCIE_HEADER_TYPE(header_value) ==
1175 TYPE1_HEADER) {
1176 pal_pci_cfg_read(bus, dev, func,
1177 BUS_NUM_REG_OFFSET,
1178 &bus_value);
1179
1180 bus_value = bus_value &
1181 PRI_BUS_CLEAR_MASK;
1182
1183 pal_pci_cfg_write(bus, dev, func,
1184 BUS_NUM_REG_OFFSET,
1185 bus_value);
1186 }
1187 }
1188 }
1189 }
1190}
1191
1192static void pcie_enumerate_devices(void)
1193{
1194 uint32_t pri_bus, sec_bus;
1195 int rc;
1196
1197 g_pcie_info_table = plat_pcie_get_info_table();
1198 if (g_pcie_info_table == NULL) {
1199 ERROR("PCIe info not returned by platform\n");
1200 panic();
1201 }
1202
1203 if (g_pcie_info_table->num_entries == 0) {
1204 INFO("Skipping Enumeration\n");
1205 return;
1206 }
1207
1208 /* Get platform specific bar config parameters */
1209 rc = plat_pcie_get_bar_config(&g_bar64_p_start, &g_rp_bar64_value,
1210 &g_bar32_np_start, &g_bar32_p_start,
1211 &g_rp_bar32_value);
1212 if (rc != 0) {
1213 ERROR("PCIe bar config parameters not returned by platform\n");
1214 panic();
1215 }
1216
1217 INFO("Starting Enumeration\n");
1218 while (g_pcie_index < g_pcie_info_table->num_entries) {
1219 pri_bus = g_pcie_info_table->block[g_pcie_index].start_bus_num;
1220
1221 sec_bus = pri_bus + 1;
1222
1223 pcie_enumerate_device(pri_bus, sec_bus);
1224 pcie_clear_pri_bus();
1225
1226 g_pcie_index++;
1227 }
1228 g_enumerate = 0;
1229 g_pcie_index = 0;
1230}
1231
Soby Mathew5929bfe2024-11-28 12:28:00 +00001232void pcie_init(void)
1233{
1234 static bool is_init;
1235
1236 /* Create PCIe table and enumeration */
1237 if (!is_init) {
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +01001238 pcie_enumerate_devices();
1239
Soby Mathew5929bfe2024-11-28 12:28:00 +00001240 pcie_create_info_table();
1241 is_init = true;
1242 }
1243}