blob: 96e1fb9b66b58e88762e6621ae96e56ca320dd33 [file] [log] [blame]
AlexeiFedorov9f0dc012024-09-10 10:22:06 +01001/*
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +01002 * Copyright (c) 2024-2025, Arm Limited. All rights reserved.
AlexeiFedorov9f0dc012024-09-10 10:22:06 +01003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
Soby Mathew5929bfe2024-11-28 12:28:00 +00008#include <errno.h>
AlexeiFedorov9f2de632024-09-10 11:48:22 +01009#include <stddef.h>
10
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010011#include <debug.h>
12#include <mmio.h>
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010013#include <pcie.h>
Soby Mathew5929bfe2024-11-28 12:28:00 +000014#include <pcie_doe.h>
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010015#include <pcie_spec.h>
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +010016#include <pcie_dvsec_rmeda.h>
Soby Mathew2c2810f2024-11-15 17:11:24 +000017#include <platform.h>
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +010018#include <plat_pcie_enum.h>
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010019#include <tftf_lib.h>
20
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010021#define PCIE_DEBUG VERBOSE
22
Soby Mathew2c2810f2024-11-15 17:11:24 +000023const struct pcie_info_table *g_pcie_info_table;
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +010024static pcie_device_bdf_table_t *g_pcie_bdf_table;
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +010025static pcie_device_bdf_table_t pcie_bdf_table;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010026
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +010027static uint32_t g_pcie_index;
28static uint32_t g_enumerate;
29
30/* 64-bit address initialisation */
31static uint64_t g_bar64_p_start;
32static uint64_t g_rp_bar64_value;
33static uint64_t g_bar64_p_max;
34static uint32_t g_64_bus, g_bar64_size;
35
36/* 32-bit address initialisation */
37static uint32_t g_bar32_np_start;
38static uint32_t g_bar32_p_start;
39static uint32_t g_rp_bar32_value;
40static uint32_t g_bar32_np_max;
41static uint32_t g_bar32_p_max;
42static uint32_t g_np_bar_size, g_p_bar_size;
43static uint32_t g_np_bus, g_p_bus;
44
45static uintptr_t pcie_cfg_addr(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +010046{
47 uint32_t bus = PCIE_EXTRACT_BDF_BUS(bdf);
48 uint32_t dev = PCIE_EXTRACT_BDF_DEV(bdf);
49 uint32_t func = PCIE_EXTRACT_BDF_FUNC(bdf);
50 uint32_t segment = PCIE_EXTRACT_BDF_SEG(bdf);
51 uint32_t cfg_addr;
52 uintptr_t ecam_base = 0;
53 unsigned int i = 0;
54
55 assert((bus < PCIE_MAX_BUS) && (dev < PCIE_MAX_DEV) && (func < PCIE_MAX_FUNC));
56 assert(g_pcie_info_table != NULL);
57
58 while (i < g_pcie_info_table->num_entries) {
59 /* Derive ECAM specific information */
60 const pcie_info_block_t *block = &g_pcie_info_table->block[i];
61
62 if ((bus >= block->start_bus_num) &&
63 (bus <= block->end_bus_num) &&
64 (segment == block->segment_num)) {
65 ecam_base = block->ecam_base;
66 break;
67 }
68 i++;
69 }
70
71 assert(ecam_base != 0);
72
73 /*
74 * There are 8 functions / device
75 * 32 devices / Bus and each has a 4KB config space
76 */
77 cfg_addr = (bus * PCIE_MAX_DEV * PCIE_MAX_FUNC * PCIE_CFG_SIZE) +
78 (dev * PCIE_MAX_FUNC * PCIE_CFG_SIZE) + (func * PCIE_CFG_SIZE);
79
80 return ecam_base + cfg_addr;
81}
82
83/*
84 * @brief This API reads 32-bit data from PCIe config space pointed by Bus,
85 * Device, Function and register offset.
86 * 1. Caller - Test Suite
87 * 2. Prerequisite - pcie_create_info_table
88 * @param bdf - concatenated Bus(8-bits), device(8-bits) & function(8-bits)
89 * @param offset - Register offset within a device PCIe config space
90 *
91 * @return 32-bit data read from the config space
92 */
93uint32_t pcie_read_cfg(uint32_t bdf, uint32_t offset)
94{
95 uintptr_t addr = pcie_cfg_addr(bdf);
96
97 return mmio_read_32(addr + offset);
98}
99
100/*
101 * @brief This API writes 32-bit data to PCIe config space pointed by Bus,
102 * Device, Function and register offset.
103 * 1. Caller - Test Suite
104 * 2. Prerequisite - val_pcie_create_info_table
105 * @param bdf - concatenated Bus(8-bits), device(8-bits) & function(8-bits)
106 * @param offset - Register offset within a device PCIe config space
107 * @param data - data to be written to the config space
108 *
109 * @return None
110 */
111void pcie_write_cfg(uint32_t bdf, uint32_t offset, uint32_t data)
112{
113 uintptr_t addr = pcie_cfg_addr(bdf);
114
115 mmio_write_32(addr + offset, data);
116}
117
118/*
119 * @brief Check if BDF is PCIe Host Bridge.
120 *
121 * @param bdf - Function's Segment/Bus/Dev/Func in PCIE_CREATE_BDF format
122 * @return false If not a Host Bridge, true If it's a Host Bridge.
123 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100124static bool pcie_is_host_bridge(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100125{
126 uint32_t reg_value = pcie_read_cfg(bdf, TYPE01_RIDR);
127
128 if ((HB_BASE_CLASS == ((reg_value >> CC_BASE_SHIFT) & CC_BASE_MASK)) &&
129 (HB_SUB_CLASS == ((reg_value >> CC_SUB_SHIFT) & CC_SUB_MASK))) {
130 return true;
131 }
132
133 return false;
134}
135
136/*
137 * @brief Find a Function's config capability offset matching it's input parameter
138 * cid. cid_offset set to the matching cpability offset w.r.t. zero.
139 *
140 * @param bdf - Segment/Bus/Dev/Func in the format of PCIE_CREATE_BDF
141 * @param cid - Capability ID
142 * @param cid_offset - On return, points to cid offset in Function config space
143 * @return PCIE_CAP_NOT_FOUND, if there was a failure in finding required capability.
144 * PCIE_SUCCESS, if the search was successful.
145 */
146uint32_t pcie_find_capability(uint32_t bdf, uint32_t cid_type, uint32_t cid,
147 uint32_t *cid_offset)
148{
149 uint32_t reg_value, next_cap_offset;
150
151 if (cid_type == PCIE_CAP) {
152 /* Search in PCIe configuration space */
153 reg_value = pcie_read_cfg(bdf, TYPE01_CPR);
154
155 next_cap_offset = (reg_value & TYPE01_CPR_MASK);
156 while (next_cap_offset != 0) {
157 reg_value = pcie_read_cfg(bdf, next_cap_offset);
158 if ((reg_value & PCIE_CIDR_MASK) == cid) {
159 *cid_offset = next_cap_offset;
160 return PCIE_SUCCESS;
161 }
162 next_cap_offset = ((reg_value >> PCIE_NCPR_SHIFT) &
163 PCIE_NCPR_MASK);
164 }
165 } else if (cid_type == PCIE_ECAP) {
166 /* Search in PCIe extended configuration space */
167 next_cap_offset = PCIE_ECAP_START;
168 while (next_cap_offset != 0) {
169 reg_value = pcie_read_cfg(bdf, next_cap_offset);
170 if ((reg_value & PCIE_ECAP_CIDR_MASK) == cid) {
171 *cid_offset = next_cap_offset;
172 return PCIE_SUCCESS;
173 }
174 next_cap_offset = ((reg_value >> PCIE_ECAP_NCPR_SHIFT) &
175 PCIE_ECAP_NCPR_MASK);
176 }
177 }
178
179 /* The capability was not found */
180 return PCIE_CAP_NOT_FOUND;
181}
182
183/*
184 * @brief This API is used as placeholder to check if the bdf
185 * obtained is valid or not
186 *
187 * @param bdf
188 * @return true if bdf is valid else false
189 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100190static bool pcie_check_device_valid(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100191{
192 (void) bdf;
193 /*
194 * Add BDFs to this function if PCIe tests
195 * need to be ignored for a BDF for any reason
196 */
197 return true;
198}
199
200/*
201 * @brief Returns whether a PCIe Function is an on-chip peripheral or not
202 *
203 * @param bdf - Segment/Bus/Dev/Func in the format of PCIE_CREATE_BDF
204 * @return Returns TRUE if the Function is on-chip peripheral, FALSE if it is
205 * not an on-chip peripheral
206 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100207static bool pcie_is_onchip_peripheral(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100208{
209 (void)bdf;
210 return false;
211}
212
213/*
214 * @brief Returns the type of pcie device or port for the given bdf
215 *
216 * @param bdf - Segment/Bus/Dev/Func in the format of PCIE_CREATE_BDF
217 * @return Returns (1 << 0b1001) for RCiEP, (1 << 0b1010) for RCEC,
218 * (1 << 0b0000) for EP, (1 << 0b0100) for RP,
219 * (1 << 0b1100) for iEP_EP, (1 << 0b1011) for iEP_RP,
220 * (1 << PCIECR[7:4]) for any other device type.
221 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100222static uint32_t pcie_device_port_type(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100223{
224 uint32_t pciecs_base, reg_value, dp_type;
225
226 /*
227 * Get the PCI Express Capability structure offset and
228 * use that offset to read pci express capabilities register
229 */
230 pcie_find_capability(bdf, PCIE_CAP, CID_PCIECS, &pciecs_base);
231 reg_value = pcie_read_cfg(bdf, pciecs_base + CIDR_OFFSET);
232
233 /* Read Device/Port bits [7:4] in Function's PCIe Capabilities register */
234 dp_type = (reg_value >> ((PCIECR_OFFSET - CIDR_OFFSET)*8 +
235 PCIECR_DPT_SHIFT)) & PCIECR_DPT_MASK;
236 dp_type = (1 << dp_type);
237
238 /* Check if the device/port is an on-chip peripheral */
239 if (pcie_is_onchip_peripheral(bdf)) {
240 if (dp_type == EP) {
241 dp_type = iEP_EP;
242 } else if (dp_type == RP) {
243 dp_type = iEP_RP;
244 }
245 }
246
247 /* Return device/port type */
248 return dp_type;
249}
250
251/*
252 * @brief Returns BDF of the upstream Root Port of a pcie device function.
253 *
254 * @param bdf - Function's Segment/Bus/Dev/Func in PCIE_CREATE_BDF format
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100255 * @return pcie_dev for success, NULL for failure.
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100256 */
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100257static pcie_dev_t *pcie_get_rootport(uint32_t bdf)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100258{
259 uint32_t seg_num, sec_bus, sub_bus;
260 uint32_t reg_value, dp_type, index = 0;
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100261 uint32_t rp_bdf;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100262
263 dp_type = pcie_device_port_type(bdf);
264
265 PCIE_DEBUG("DP type 0x%x\n", dp_type);
266
267 /* If the device is RP or iEP_RP, set its rootport value to same */
268 if ((dp_type == RP) || (dp_type == iEP_RP)) {
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100269 return NULL;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100270 }
271
272 /* If the device is RCiEP and RCEC, set RP as 0xff */
273 if ((dp_type == RCiEP) || (dp_type == RCEC)) {
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100274 return NULL;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100275 }
276
Soby Mathew2c2810f2024-11-15 17:11:24 +0000277 assert(g_pcie_bdf_table != NULL);
278
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100279 while (index < g_pcie_bdf_table->num_entries) {
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100280 rp_bdf = g_pcie_bdf_table->device[index].bdf;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100281
282 /*
283 * Extract Secondary and Subordinate Bus numbers of the
284 * upstream Root port and check if the input function's
285 * bus number falls within that range.
286 */
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100287 reg_value = pcie_read_cfg(rp_bdf, TYPE1_PBN);
288 seg_num = PCIE_EXTRACT_BDF_SEG(rp_bdf);
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100289 sec_bus = ((reg_value >> SECBN_SHIFT) & SECBN_MASK);
290 sub_bus = ((reg_value >> SUBBN_SHIFT) & SUBBN_MASK);
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100291 dp_type = pcie_device_port_type(rp_bdf);
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100292
293 if (((dp_type == RP) || (dp_type == iEP_RP)) &&
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100294 (sec_bus <= PCIE_EXTRACT_BDF_BUS(bdf)) &&
295 (sub_bus >= PCIE_EXTRACT_BDF_BUS(bdf)) &&
296 (seg_num == PCIE_EXTRACT_BDF_SEG(bdf))) {
297 return &g_pcie_bdf_table->device[index];
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100298 }
299
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100300 index++;
301 }
302
303 /* Return failure */
304 ERROR("PCIe Hierarchy fail: RP of bdf 0x%x not found\n", bdf);
305 return NULL;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100306}
307
308/*
309 * @brief Sanity checks that all Endpoints must have a Rootport
310 *
311 * @param None
312 * @return 0 if sanity check passes, 1 if sanity check fails
313 */
314static uint32_t pcie_populate_device_rootport(void)
315{
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100316 uint32_t bdf;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100317 pcie_device_bdf_table_t *bdf_tbl_ptr = g_pcie_bdf_table;
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100318 pcie_dev_t *rp_dev;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100319
Soby Mathew2c2810f2024-11-15 17:11:24 +0000320 assert(bdf_tbl_ptr != NULL);
321
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100322 for (unsigned int tbl_index = 0; tbl_index < bdf_tbl_ptr->num_entries;
323 tbl_index++) {
324 bdf = bdf_tbl_ptr->device[tbl_index].bdf;
325
326 /* Checks if the BDF has RootPort */
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100327 rp_dev = pcie_get_rootport(bdf);
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100328
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100329 bdf_tbl_ptr->device[tbl_index].rp_dev = rp_dev;
330
331 if (rp_dev != NULL) {
332 INFO("Dev bdf: 0x%x RP bdf: 0x%x\n", bdf,
333 rp_dev->bdf);
334 } else {
335 INFO("Dev bdf: 0x%x RP bdf: none\n", bdf);
336 }
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100337 }
338
339 return 0;
340}
341
342/*
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100343 * @brief Returns the header type of the input pcie device function
344 *
345 * @param bdf - Segment/Bus/Dev/Func in the format of PCIE_CREATE_BDF
346 * @return TYPE0_HEADER for functions with Type 0 config space header,
347 * TYPE1_HEADER for functions with Type 1 config space header,
348 */
349static uint32_t pcie_function_header_type(uint32_t bdf)
350{
351 /* Read four bytes of config space starting from cache line size register */
352 uint32_t reg_value = pcie_read_cfg(bdf, TYPE01_CLSR);
353
354 /* Extract header type register value */
355 reg_value = ((reg_value >> TYPE01_HTR_SHIFT) & TYPE01_HTR_MASK);
356
357 /* Header layout bits within header type register indicate the header type */
358 return ((reg_value >> HTR_HL_SHIFT) & HTR_HL_MASK);
359}
360
361/*
362 * @brief Returns the ECAM address of the input PCIe function
363 *
364 * @param bdf - Segment/Bus/Dev/Func in PCIE_CREATE_BDF format
365 * @return ECAM address if success, else NULL address
366 */
367static uintptr_t pcie_get_ecam_base(uint32_t bdf)
368{
369 uint8_t ecam_index = 0, sec_bus = 0, sub_bus;
370 uint16_t seg_num = (uint16_t)PCIE_EXTRACT_BDF_SEG(bdf);
371 uint32_t reg_value;
372 uintptr_t ecam_base = 0;
373
374 assert(g_pcie_info_table != NULL);
375
376 while (ecam_index < g_pcie_info_table->num_entries) {
377 /* Derive ECAM specific information */
378 const pcie_info_block_t *block = &g_pcie_info_table->block[ecam_index];
379
380 if (seg_num == block->segment_num) {
381 if (pcie_function_header_type(bdf) == TYPE0_HEADER) {
382 /* Return ecam_base if Type0 Header */
383 ecam_base = block->ecam_base;
384 break;
385 }
386
387 /* Check for Secondary/Subordinate bus if Type1 Header */
388 reg_value = pcie_read_cfg(bdf, TYPE1_PBN);
389 sec_bus = ((reg_value >> SECBN_SHIFT) & SECBN_MASK);
390 sub_bus = ((reg_value >> SUBBN_SHIFT) & SUBBN_MASK);
391
392 if ((sec_bus >= block->start_bus_num) &&
393 (sub_bus <= block->end_bus_num)) {
394 ecam_base = block->ecam_base;
395 break;
396 }
397 }
398 ecam_index++;
399 }
400
401 return ecam_base;
402}
403
404static void pcie_devices_init_fields(void)
405{
406 pcie_device_bdf_table_t *bdf_tbl_ptr = g_pcie_bdf_table;
407 pcie_dev_t *pcie_dev;
408 uint32_t status;
409 uint32_t base;
410 uint32_t bdf;
411
412 assert(bdf_tbl_ptr != NULL);
413
414 for (uint32_t i = 0; i < bdf_tbl_ptr->num_entries; i++) {
415 pcie_dev = &bdf_tbl_ptr->device[i];
416 bdf = pcie_dev->bdf;
417
418 pcie_dev->dp_type = pcie_device_port_type(bdf);
419 pcie_dev->ecam_base = pcie_get_ecam_base(bdf);
420
421 /* Has DOE? */
422 status = pcie_find_capability(bdf, PCIE_ECAP, ECID_DOE, &base);
423 if (status == PCIE_SUCCESS) {
424 pcie_dev->cflags |= PCIE_DEV_CFLAG_DOE;
425 pcie_dev->doe_cap_base = base;
426 }
427
428 /* Has IDE? */
429 status = pcie_find_capability(bdf, PCIE_ECAP, ECID_IDE, &base);
430 if (status == PCIE_SUCCESS) {
431 pcie_dev->cflags |= PCIE_DEV_CFLAG_IDE;
432 pcie_dev->ide_cap_base = base;
433 }
434
435 if (pcie_dev->dp_type == RP) {
436 status = pcie_find_rmeda_capability(bdf, &base);
437 if (status == PCIE_SUCCESS) {
438 pcie_dev->cflags |= PCIE_DEV_CFLAG_DVSEC_RMEDA;
439 pcie_dev->dvsec_rmeda_cap_base = base;
440 }
441 }
442 }
443}
444
445/*
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100446 * @brief Returns the BDF Table pointer
447 *
448 * @param None
449 *
450 * @return BDF Table pointer
451 */
452pcie_device_bdf_table_t *pcie_get_bdf_table(void)
453{
Soby Mathew2c2810f2024-11-15 17:11:24 +0000454 assert(g_pcie_bdf_table != NULL);
455
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100456 return g_pcie_bdf_table;
457}
458
459/*
460 * @brief This API creates the device bdf table from enumeration
461 *
462 * @param None
463 *
464 * @return None
465 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100466static void pcie_create_device_bdf_table(void)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100467{
468 uint32_t seg_num, start_bus, end_bus;
469 uint32_t bus_index, dev_index, func_index, ecam_index;
470 uint32_t bdf, reg_value, cid_offset, status;
471
472 assert(g_pcie_bdf_table != NULL);
473
474 g_pcie_bdf_table->num_entries = 0;
Soby Mathew2c2810f2024-11-15 17:11:24 +0000475
476 assert(g_pcie_info_table != NULL);
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100477 assert(g_pcie_info_table->num_entries != 0);
478
479 for (ecam_index = 0; ecam_index < g_pcie_info_table->num_entries; ecam_index++) {
480 /* Derive ECAM specific information */
481 const pcie_info_block_t *block = &g_pcie_info_table->block[ecam_index];
482
483 seg_num = block->segment_num;
484 start_bus = block->start_bus_num;
485 end_bus = block->end_bus_num;
486
487 /* Iterate over all buses, devices and functions in this ecam */
488 for (bus_index = start_bus; bus_index <= end_bus; bus_index++) {
489 for (dev_index = 0; dev_index < PCIE_MAX_DEV; dev_index++) {
490 for (func_index = 0; func_index < PCIE_MAX_FUNC; func_index++) {
491 /* Form BDF using seg, bus, device, function numbers */
492 bdf = PCIE_CREATE_BDF(seg_num, bus_index, dev_index,
493 func_index);
494
495 /* Probe PCIe device Function with this BDF */
496 reg_value = pcie_read_cfg(bdf, TYPE01_VIDR);
497
498 /* Store the Function's BDF if there was a valid response */
499 if (reg_value != PCIE_UNKNOWN_RESPONSE) {
500 /* Skip if the device is a host bridge */
501 if (pcie_is_host_bridge(bdf)) {
502 continue;
503 }
504
505 /* Skip if the device is a PCI legacy device */
506 if (pcie_find_capability(bdf, PCIE_CAP,
507 CID_PCIECS, &cid_offset) != PCIE_SUCCESS) {
508 continue;
509 }
510
511 status = pcie_check_device_valid(bdf);
512 if (!status) {
513 continue;
514 }
515
516 g_pcie_bdf_table->device[
517 g_pcie_bdf_table->num_entries++].bdf = bdf;
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100518
519 assert(g_pcie_bdf_table->num_entries < PCIE_DEVICES_MAX);
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100520 }
521 }
522 }
523 }
524 }
525
526 /* Sanity Check : Confirm all EP (normal, integrated) have a rootport */
527 pcie_populate_device_rootport();
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100528
529 /*
530 * Once devices are enumerated and rootports are assigned, initialize
531 * the rest of pcie_dev fields
532 */
533 pcie_devices_init_fields();
534
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100535 INFO("Number of BDFs found : %u\n", g_pcie_bdf_table->num_entries);
536}
537
538/*
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100539 * @brief This API prints all the PCIe Devices info
540 * 1. Caller - Validation layer.
541 * 2. Prerequisite - val_pcie_create_info_table()
542 * @param None
543 * @return None
544 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100545static void pcie_print_device_info(void)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100546{
547 uint32_t bdf, dp_type;
548 uint32_t tbl_index = 0;
549 uint32_t ecam_index = 0;
550 uint32_t ecam_base, ecam_start_bus, ecam_end_bus;
551 pcie_device_bdf_table_t *bdf_tbl_ptr = g_pcie_bdf_table;
Soby Mathew2c2810f2024-11-15 17:11:24 +0000552 uint32_t num_rciep __unused = 0, num_rcec __unused = 0;
553 uint32_t num_iep __unused = 0, num_irp __unused = 0;
554 uint32_t num_ep __unused = 0, num_rp __unused = 0;
555 uint32_t num_dp __unused = 0, num_up __unused = 0;
556 uint32_t num_pcie_pci __unused = 0, num_pci_pcie __unused = 0;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100557 uint32_t bdf_counter;
558
Soby Mathew2c2810f2024-11-15 17:11:24 +0000559 assert(bdf_tbl_ptr != NULL);
560
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100561 if (bdf_tbl_ptr->num_entries == 0) {
562 INFO("BDF Table: No RCiEP or iEP found\n");
563 return;
564 }
565
566 for (tbl_index = 0; tbl_index < bdf_tbl_ptr->num_entries; tbl_index++) {
567 bdf = bdf_tbl_ptr->device[tbl_index].bdf;
568 dp_type = pcie_device_port_type(bdf);
569
570 switch (dp_type) {
571 case RCiEP:
572 num_rciep++;
573 break;
574 case RCEC:
575 num_rcec++;
576 break;
577 case EP:
578 num_ep++;
579 break;
580 case RP:
581 num_rp++;
582 break;
583 case iEP_EP:
584 num_iep++;
585 break;
586 case iEP_RP:
587 num_irp++;
588 break;
589 case UP:
590 num_up++;
591 break;
592 case DP:
593 num_dp++;
594 break;
595 case PCI_PCIE:
596 num_pci_pcie++;
597 break;
598 case PCIE_PCI:
599 num_pcie_pci++;
600 break;
601 default:
602 ERROR("Unknown dp_type 0x%x\n", dp_type);
603 }
604 }
605
606 INFO("Number of RCiEP : %u\n", num_rciep);
607 INFO("Number of RCEC : %u\n", num_rcec);
608 INFO("Number of EP : %u\n", num_ep);
609 INFO("Number of RP : %u\n", num_rp);
610 INFO("Number of iEP_EP : %u\n", num_iep);
611 INFO("Number of iEP_RP : %u\n", num_irp);
612 INFO("Number of UP of switch : %u\n", num_up);
613 INFO("Number of DP of switch : %u\n", num_dp);
614 INFO("Number of PCI/PCIe Bridge: %u\n", num_pci_pcie);
615 INFO("Number of PCIe/PCI Bridge: %u\n", num_pcie_pci);
616
Soby Mathew2c2810f2024-11-15 17:11:24 +0000617 assert(g_pcie_info_table != NULL);
618
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100619 while (ecam_index < g_pcie_info_table->num_entries) {
620
621 /* Derive ECAM specific information */
622 const pcie_info_block_t *block = &g_pcie_info_table->block[ecam_index];
623
624 ecam_base = block->ecam_base;
625 ecam_start_bus = block->start_bus_num;
626 ecam_end_bus = block->end_bus_num;
627 tbl_index = 0;
628 bdf_counter = 0;
629
630 INFO("ECAM %u: base 0x%x\n", ecam_index, ecam_base);
631
632 while (tbl_index < bdf_tbl_ptr->num_entries) {
633 uint32_t seg_num, bus_num, dev_num, func_num;
Soby Mathew2c2810f2024-11-15 17:11:24 +0000634 uint32_t device_id __unused, vendor_id __unused, reg_value;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100635 uint32_t bdf, dev_ecam_base;
636
637 bdf = bdf_tbl_ptr->device[tbl_index++].bdf;
638 seg_num = PCIE_EXTRACT_BDF_SEG(bdf);
639 bus_num = PCIE_EXTRACT_BDF_BUS(bdf);
640 dev_num = PCIE_EXTRACT_BDF_DEV(bdf);
641 func_num = PCIE_EXTRACT_BDF_FUNC(bdf);
642
643 reg_value = pcie_read_cfg(bdf, TYPE01_VIDR);
644 device_id = (reg_value >> TYPE01_DIDR_SHIFT) & TYPE01_DIDR_MASK;
645 vendor_id = (reg_value >> TYPE01_VIDR_SHIFT) & TYPE01_VIDR_MASK;
646
647 dev_ecam_base = pcie_get_ecam_base(bdf);
648
649 if ((ecam_base == dev_ecam_base) &&
650 (bus_num >= ecam_start_bus) &&
651 (bus_num <= ecam_end_bus)) {
652 bdf_counter = 1;
653 bdf = PCIE_CREATE_BDF(seg_num, bus_num, dev_num, func_num);
654 INFO(" BDF: 0x%x\n", bdf);
655 INFO(" Seg: 0x%x Bus: 0x%x Dev: 0x%x "
656 "Func: 0x%x Dev ID: 0x%x Vendor ID: 0x%x\n",
657 seg_num, bus_num, dev_num, func_num,
658 device_id, vendor_id);
659 }
660 }
661
662 if (bdf_counter == 0) {
663 INFO(" No BDF devices in ECAM region index %d\n", ecam_index);
664 }
665
666 ecam_index++;
667 }
668}
669
670/*
671 * @brief Create PCIe table and PCI enumeration
672 * @param void
673 * @return void
674 */
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100675static void pcie_create_info_table(void)
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100676{
677 unsigned int num_ecam;
678
679 INFO("Creating PCIe info table\n");
Arunachalam Ganapathyb0833d22025-06-26 11:05:51 +0100680 g_pcie_bdf_table = &pcie_bdf_table;
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100681
682 num_ecam = g_pcie_info_table->num_entries;
683 INFO("Number of ECAM regions : %u\n", num_ecam);
Soby Mathew2c2810f2024-11-15 17:11:24 +0000684 if ((num_ecam == 0) || (num_ecam > MAX_PCIE_INFO_ENTRIES)) {
685 ERROR("PCIe info entries invalid\n");
686 panic();
AlexeiFedorov9f0dc012024-09-10 10:22:06 +0100687 }
688 pcie_create_device_bdf_table();
689 pcie_print_device_info();
690}
Soby Mathew5929bfe2024-11-28 12:28:00 +0000691
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +0100692static void pal_pci_cfg_write(uint32_t bus, uint32_t dev, uint32_t func,
693 uint32_t offset, uint32_t data)
694{
695 pcie_write_cfg(PCIE_CREATE_BDF(0, bus, dev, func), offset, data);
696}
697
698static void pal_pci_cfg_read(uint32_t bus, uint32_t dev, uint32_t func,
699 uint32_t offset, uint32_t *value)
700{
701 *value = pcie_read_cfg(PCIE_CREATE_BDF(0, bus, dev, func), offset);
702}
703
704/*
705 * This API programs the Memory Base and Memeory limit register of the Bus,
706 * Device and Function of Type1 Header
707 */
708static void get_resource_base_32(uint32_t bus, uint32_t dev, uint32_t func,
709 uint32_t bar32_p_base, uint32_t bar32_np_base,
710 uint32_t bar32_p_limit, uint32_t bar32_np_limit)
711{
712 uint32_t mem_bar_np;
713 uint32_t mem_bar_p;
714
715 /* Update the 32 bit NP-BAR start address for the next iteration */
716 if (bar32_np_base != g_bar32_np_start) {
717 if ((g_bar32_np_start << 12) != 0) {
718 g_bar32_np_start = (g_bar32_np_start &
719 MEM_BASE32_LIM_MASK) + BAR_INCREMENT;
720 }
721
722 if (bar32_np_limit == g_bar32_np_start) {
723 bar32_np_limit = bar32_np_limit - BAR_INCREMENT;
724 }
725
726 pal_pci_cfg_read(bus, dev, func, NON_PRE_FET_OFFSET,
727 &mem_bar_np);
728 mem_bar_np = ((bar32_np_limit & MEM_BASE32_LIM_MASK) |
729 mem_bar_np);
730 pal_pci_cfg_write(bus, dev, func, NON_PRE_FET_OFFSET,
731 mem_bar_np);
732 }
733
734 /* Update the 32 bit P-BAR start address for the next iteration */
735 if (bar32_p_base != g_bar32_p_start) {
736 if ((g_bar32_p_start << 12) != 0) {
737 g_bar32_p_start = (g_bar32_p_start &
738 MEM_BASE32_LIM_MASK) + BAR_INCREMENT;
739 }
740
741 if (bar32_p_limit == g_bar32_p_start) {
742 bar32_p_limit = bar32_p_limit - BAR_INCREMENT;
743 }
744
745 pal_pci_cfg_read(bus, dev, func, PRE_FET_OFFSET, &mem_bar_p);
746 mem_bar_p = ((bar32_p_limit & MEM_BASE32_LIM_MASK) | mem_bar_p);
747 pal_pci_cfg_write(bus, dev, func, PRE_FET_OFFSET, mem_bar_p);
748 }
749}
750
751/*
752 * This API programs the Memory Base and Memeory limit register of the Bus,
753 * Device and Function of Type1 Header
754 */
755static void get_resource_base_64(uint32_t bus, uint32_t dev, uint32_t func,
756 uint64_t bar64_p_base, uint64_t g_bar64_p_max)
757{
758 uint32_t bar64_p_lower32_base = (uint32_t)bar64_p_base;
759 uint32_t bar64_p_upper32_base = (uint32_t)(bar64_p_base >> 32);
760 uint32_t bar64_p_lower32_limit = (uint32_t)g_bar64_p_max;
761 uint32_t bar64_p_upper32_limit = (uint32_t)(g_bar64_p_max >> 32);
762
763 /* Obtain the memory base and memory limit */
764 bar64_p_lower32_base = REG_MASK_SHIFT(bar64_p_lower32_base);
765 bar64_p_lower32_limit = REG_MASK_SHIFT(bar64_p_lower32_limit);
766 uint32_t mem_bar_p = ((bar64_p_lower32_limit << 16) |
767 bar64_p_lower32_base);
768
769 /* Configure Memory base and Memory limit register */
770 if ((bar64_p_base != g_bar64_p_max) && (g_bar64_p_start <=
771 g_bar64_p_max)) {
772 if ((g_bar64_p_start << 12) != 0) {
773 g_bar64_p_start = (g_bar64_p_start &
774 MEM_BASE64_LIM_MASK) + BAR_INCREMENT;
775 }
776
777 if (bar64_p_lower32_limit == g_bar64_p_start) {
778 bar64_p_lower32_limit = bar64_p_lower32_limit -
779 BAR_INCREMENT;
780 }
781
782 g_bar64_p_start = (g_bar64_p_start & MEM_BASE64_LIM_MASK) +
783 BAR_INCREMENT;
784
785 pal_pci_cfg_write(bus, dev, func, PRE_FET_OFFSET, mem_bar_p);
786 pal_pci_cfg_write(bus, dev, func, PRE_FET_OFFSET + 4,
787 bar64_p_upper32_base);
788 pal_pci_cfg_write(bus, dev, func, PRE_FET_OFFSET + 8,
789 bar64_p_upper32_limit);
790 }
791}
792
793static void pcie_rp_program_bar(uint32_t bus, uint32_t dev, uint32_t func)
794{
795 uint64_t bar_size, bar_upper_bits;
796 uint32_t offset = BAR0_OFFSET;
797 uint32_t bar_reg_value, bar_lower_bits;
798
799 while (offset <= TYPE1_BAR_MAX_OFF) {
800 pal_pci_cfg_read(bus, dev, func, offset, &bar_reg_value);
801
802 if (BAR_REG(bar_reg_value) == BAR_64_BIT) {
803 /*
804 * BAR supports 64-bit address therefore, write all 1's
805 * to BARn and BARn+1 and identify the size requested
806 */
807 pal_pci_cfg_write(bus, dev, func, offset, 0xFFFFFFF0);
808 pal_pci_cfg_write(bus, dev, func, offset + 4,
809 0xFFFFFFFF);
810 pal_pci_cfg_read(bus, dev, func, offset,
811 &bar_lower_bits);
812 bar_size = bar_lower_bits & BAR_MASK;
813
814 pal_pci_cfg_read(bus, dev, func, offset + 4,
815 &bar_reg_value);
816 bar_upper_bits = bar_reg_value;
817 bar_size = bar_size | (bar_upper_bits << 32);
818
819 bar_size = ~bar_size + 1;
820
821 /*
822 * If BAR size is 0, then BAR not implemented, move to
823 * next BAR
824 */
825 if (bar_size == 0) {
826 offset = offset + 8;
827 continue;
828 }
829
830 pal_pci_cfg_write(bus, dev, func, offset,
831 (uint32_t)g_rp_bar64_value);
832 pal_pci_cfg_write(bus, dev, func, offset + 4,
833 (uint32_t)(g_rp_bar64_value >> 32));
834 offset = offset + 8;
835 } else {
836 /*
837 * BAR supports 32-bit address. Write all 1's to BARn
838 * and identify the size requested
839 */
840 pal_pci_cfg_write(bus, dev, func, offset, 0xFFFFFFF0);
841 pal_pci_cfg_read(bus, dev, func, offset,
842 &bar_lower_bits);
843 bar_reg_value = bar_lower_bits & BAR_MASK;
844 bar_size = ~bar_reg_value + 1;
845
846 /*
847 * If BAR size is 0, then BAR not implemented, move to
848 * next BAR
849 */
850 if (bar_size == 0) {
851 offset = offset + 4;
852 continue;
853 }
854
855 pal_pci_cfg_write(bus, dev, func, offset,
856 g_rp_bar32_value);
857 g_rp_bar32_value = g_rp_bar32_value + (uint32_t)bar_size;
858 offset = offset + 4;
859 }
860 }
861}
862
863/*
864 * This API programs all the BAR register in PCIe config space pointed by Bus,
865 * Device and Function for an End Point PCIe device
866 */
867static void pcie_program_bar_reg(uint32_t bus, uint32_t dev, uint32_t func)
868{
869 uint64_t bar_size, bar_upper_bits;
870 uint32_t bar_reg_value, bar_lower_bits;
871 uint32_t offset = BAR0_OFFSET;
872 uint32_t np_bar_size = 0;
873 uint32_t p_bar_size = 0, p_bar64_size = 0;
874
875 while (offset <= TYPE0_BAR_MAX_OFF) {
876 pal_pci_cfg_read(bus, dev, func, offset, &bar_reg_value);
877
878 if (BAR_MEM(bar_reg_value) == BAR_PRE_MEM) {
879 if (BAR_REG(bar_reg_value) == BAR_64_BIT) {
880 /*
881 * BAR supports 64-bit address therefore,
882 * write all 1's to BARn and BARn+1 and identify
883 * the size requested
884 */
885
886 pal_pci_cfg_write(bus, dev, func, offset,
887 0xFFFFFFF0);
888 pal_pci_cfg_write(bus, dev, func, offset + 4,
889 0xFFFFFFFF);
890 pal_pci_cfg_read(bus, dev, func, offset,
891 &bar_lower_bits);
892 bar_size = bar_lower_bits & BAR_MASK;
893
894 pal_pci_cfg_read(bus, dev, func, offset + 4,
895 &bar_reg_value);
896 bar_upper_bits = bar_reg_value;
897 bar_size = bar_size | (bar_upper_bits << 32);
898
899 bar_size = ~bar_size + 1;
900
901 /*
902 * If BAR size is 0, then BAR not implemented,
903 * move to next BAR
904 */
905 if (bar_size == 0) {
906 offset = offset + 8;
907 continue;
908 }
909
910 /*
911 * If p_bar64_size = 0 and bus number is same as
912 * bus of previous bus number, then check if the
913 * current PCIe Device BAR size is greater than
914 * the previous BAR size, if yes then add current
915 * BAR size to the updated start address else
916 * add the previous BAR size to the updated
917 * start address
918 */
919 if ((p_bar64_size == 0) && ((g_64_bus == bus))) {
920 if (g_bar64_size < bar_size) {
921 g_bar64_p_start =
922 g_bar64_p_start +
923 bar_size;
924 } else {
925 g_bar64_p_start =
926 g_bar64_p_start +
927 g_bar64_size;
928 }
929 } else if ((g_bar64_size < bar_size) &&
930 (p_bar64_size != 0)) {
931 g_bar64_p_start = g_bar64_p_start +
932 bar_size;
933 } else {
934 g_bar64_p_start = g_bar64_p_start +
935 p_bar64_size;
936 }
937
938 pal_pci_cfg_write(bus, dev, func, offset,
939 (uint32_t)g_bar64_p_start);
940 pal_pci_cfg_write(bus, dev, func, offset + 4,
941 (uint32_t)(g_bar64_p_start >>
942 32));
943
944 p_bar64_size = (uint32_t)bar_size;
945 g_bar64_size = (uint32_t)bar_size;
946 g_64_bus = bus;
947 offset = offset + 8;
948 } else {
949 /*
950 * BAR supports 32-bit address. Write all 1's
951 * to BARn and identify the size requested
952 */
953 pal_pci_cfg_write(bus, dev, func, offset,
954 0xFFFFFFF0);
955 pal_pci_cfg_read(bus, dev, func, offset,
956 &bar_lower_bits);
957 bar_reg_value = bar_lower_bits & BAR_MASK;
958 bar_size = ~bar_reg_value + 1;
959
960 /*
961 * If BAR size is 0, then BAR not implemented,
962 * move to next BAR
963 */
964 if (bar_size == 0) {
965 offset = offset + 4;
966 continue;
967 }
968
969 /*
970 * If p_bar_size = 0 and bus number is same as
971 * bus of previous bus number, then check if the
972 * current PCIe Device BAR size is greater than
973 * the previous BAR size, if yes then add
974 * current BAR size to the updated start
975 * address else add the previous BAR size to the
976 * updated start address
977 */
978 if ((p_bar_size == 0) && ((g_p_bus == bus))) {
979 if (g_p_bar_size < bar_size) {
980 g_bar32_p_start =
981 g_bar32_p_start +
982 (uint32_t)bar_size;
983 } else {
984 g_bar32_p_start =
985 g_bar32_p_start +
986 g_p_bar_size;
987 }
988 } else if ((g_p_bar_size < bar_size) &&
989 (p_bar_size != 0)) {
990 g_bar32_p_start = g_bar32_p_start +
991 (uint32_t)bar_size;
992 } else {
993 g_bar32_p_start = g_bar32_p_start +
994 p_bar_size;
995 }
996
997 pal_pci_cfg_write(bus, dev, func, offset,
998 g_bar32_p_start);
999 p_bar_size = (uint32_t)bar_size;
1000 g_p_bar_size = (uint32_t)bar_size;
1001 g_p_bus = bus;
1002
1003 offset = offset + 4;
1004 }
1005 } else {
1006 /*
1007 * BAR supports 32-bit address. Write all 1's to BARn
1008 * and identify the size requested
1009 */
1010 pal_pci_cfg_write(bus, dev, func, offset, 0xFFFFFFF0);
1011 pal_pci_cfg_read(bus, dev, func, offset,
1012 &bar_lower_bits);
1013 bar_reg_value = bar_lower_bits & BAR_MASK;
1014 bar_size = ~bar_reg_value + 1;
1015
1016 /*
1017 * If BAR size is 0, then BAR not implemented, move to
1018 * next BAR
1019 */
1020 if (bar_size == 0) {
1021 if (BAR_REG(bar_lower_bits) == BAR_64_BIT) {
1022 offset = offset + 8;
1023 }
1024
1025 if (BAR_REG(bar_lower_bits) == BAR_32_BIT) {
1026 offset = offset + 4;
1027 }
1028
1029 continue;
1030 }
1031
1032 /*
1033 * If np_bar_size = 0 and bus number is same as bus of
1034 * previous bus number, then check if the current PCIe
1035 * Device BAR size is greater than the previous BAR
1036 * size, if yes then add current BAR size to the
1037 * updated start address else add the previous BAR size
1038 * to the updated start address
1039 */
1040 if ((np_bar_size == 0) && ((g_np_bus == bus))) {
1041 if (g_np_bar_size < bar_size) {
1042 g_bar32_np_start = g_bar32_np_start +
1043 (uint32_t)bar_size;
1044 } else {
1045 g_bar32_np_start = g_bar32_np_start +
1046 g_np_bar_size;
1047 }
1048 } else if ((g_np_bar_size < bar_size) &&
1049 (np_bar_size != 0)) {
1050 g_bar32_np_start = g_bar32_np_start +
1051 (uint32_t)bar_size;
1052 } else {
1053 g_bar32_np_start = g_bar32_np_start +
1054 np_bar_size;
1055 }
1056
1057 pal_pci_cfg_write(bus, dev, func, offset,
1058 g_bar32_np_start);
1059 np_bar_size = (uint32_t)bar_size;
1060 g_np_bar_size = (uint32_t)bar_size;
1061 g_np_bus = bus;
1062
1063 pal_pci_cfg_read(bus, dev, func, offset, &bar_reg_value);
1064 if (BAR_REG(bar_reg_value) == BAR_64_BIT) {
1065 pal_pci_cfg_write(bus, dev, func,
1066 offset + 4, 0);
1067 offset = offset + 8;
1068 }
1069
1070 if (BAR_REG(bar_reg_value) == BAR_32_BIT) {
1071 offset = offset + 4;
1072 }
1073 }
1074
1075 g_bar32_p_max = g_bar32_p_start;
1076 g_bar32_np_max = g_bar32_np_start;
1077 g_bar64_p_max = g_bar64_p_start;
1078 }
1079}
1080
1081/*
1082 * This API performs the PCIe bus enumeration
1083 *
1084 * bus,sec_bus - Bus(8-bits), secondary bus (8-bits)
1085 * sub_bus - Subordinate bus
1086 */
1087static uint32_t pcie_enumerate_device(uint32_t bus, uint32_t sec_bus)
1088{
1089 uint32_t vendor_id = 0;
1090 uint32_t header_value;
1091 uint32_t sub_bus = bus;
1092 uint32_t dev;
1093 uint32_t func;
1094 uint32_t class_code;
1095 uint32_t com_reg_value;
1096 uint32_t bar32_p_limit;
1097 uint32_t bar32_np_limit;
1098 uint32_t bar32_p_base = g_bar32_p_start;
1099 uint32_t bar32_np_base = g_bar32_np_start;
1100 uint64_t bar64_p_base = g_bar64_p_start;
1101
1102 if (bus == ((g_pcie_info_table->block[g_pcie_index].end_bus_num) + 1)) {
1103 return sub_bus;
1104 }
1105
1106 for (dev = 0; dev < PCIE_MAX_DEV; dev++) {
1107 for (func = 0; func < PCIE_MAX_FUNC; func++) {
1108 pal_pci_cfg_read(bus, dev, func, 0, &vendor_id);
1109
1110 if ((vendor_id == 0x0) || (vendor_id == 0xFFFFFFFF)) {
1111 continue;
1112 }
1113
1114 /* Skip Hostbridge configuration */
1115 pal_pci_cfg_read(bus, dev, func, TYPE01_RIDR,
1116 &class_code);
1117
1118 if ((((class_code >> CC_BASE_SHIFT) & CC_BASE_MASK) ==
1119 HB_BASE_CLASS) &&
1120 (((class_code >> CC_SUB_SHIFT) & CC_SUB_MASK)) ==
1121 HB_SUB_CLASS) {
1122 continue;
1123 }
1124
1125 pal_pci_cfg_read(bus, dev, func, HEADER_OFFSET,
1126 &header_value);
1127 if (PCIE_HEADER_TYPE(header_value) == TYPE1_HEADER) {
1128 /*
1129 * Enable memory access, Bus master enable and
1130 * I/O access
1131 */
1132 pal_pci_cfg_read(bus, dev, func,
1133 COMMAND_REG_OFFSET,
1134 &com_reg_value);
1135
1136 pal_pci_cfg_write(bus, dev, func,
1137 COMMAND_REG_OFFSET,
1138 (com_reg_value |
1139 REG_ACC_DATA));
1140
1141 pal_pci_cfg_write(bus, dev, func,
1142 BUS_NUM_REG_OFFSET,
1143 BUS_NUM_REG_CFG(0xFF, sec_bus,
1144 bus));
1145
1146 pal_pci_cfg_write(bus, dev, func,
1147 NON_PRE_FET_OFFSET,
1148 ((g_bar32_np_start >> 16) &
1149 0xFFF0));
1150
1151 pal_pci_cfg_write(bus, dev, func,
1152 PRE_FET_OFFSET,
1153 ((g_bar32_p_start >> 16) &
1154 0xFFF0));
1155
1156 sub_bus = pcie_enumerate_device(sec_bus,
1157 (sec_bus + 1));
1158 pal_pci_cfg_write(bus, dev, func,
1159 BUS_NUM_REG_OFFSET,
1160 BUS_NUM_REG_CFG(sub_bus,
1161 sec_bus, bus));
1162 sec_bus = sub_bus + 1;
1163
1164 /*
1165 * Obtain the start memory base address & the
1166 * final memory base address of 32 bit BAR
1167 */
1168 bar32_p_limit = g_bar32_p_max;
1169 bar32_np_limit = g_bar32_np_max;
1170
1171 get_resource_base_32(bus, dev, func,
1172 bar32_p_base,
1173 bar32_np_base,
1174 bar32_p_limit,
1175 bar32_np_limit);
1176
1177 /*
1178 * Obtain the start memory base address & the
1179 * final memory base address of 64 bit BAR
1180 */
1181 get_resource_base_64(bus, dev, func,
1182 bar64_p_base,
1183 g_bar64_p_max);
1184
1185 /* Update the BAR values of Type 1 Devices */
1186 pcie_rp_program_bar(bus, dev, func);
1187
1188 /* Update the base and limit values */
1189 bar32_p_base = g_bar32_p_start;
1190 bar32_np_base = g_bar32_np_start;
1191 bar64_p_base = g_bar64_p_start;
1192 }
1193
1194 if (PCIE_HEADER_TYPE(header_value) == TYPE0_HEADER) {
1195 pcie_program_bar_reg(bus, dev, func);
1196 sub_bus = sec_bus - 1;
1197 }
1198 }
1199 }
1200
1201 return sub_bus;
1202}
1203
1204/*
1205 * This API clears the primary bus number configured in the Type1 Header.
1206 * Note: This is done to make sure the hardware is compatible
1207 * with Linux enumeration.
1208 */
1209static void pcie_clear_pri_bus(void)
1210{
1211 uint32_t bus;
1212 uint32_t dev;
1213 uint32_t func;
1214 uint32_t bus_value;
1215 uint32_t header_value;
1216 uint32_t vendor_id;
1217
1218 for (bus = 0; bus <= g_pcie_info_table->block[g_pcie_index].end_bus_num;
1219 bus++) {
1220 for (dev = 0; dev < PCIE_MAX_DEV; dev++) {
1221 for (func = 0; func < PCIE_MAX_FUNC; func++) {
1222 pal_pci_cfg_read(bus, dev, func, 0, &vendor_id);
1223
1224 if ((vendor_id == 0x0) ||
1225 (vendor_id == 0xFFFFFFFF)) {
1226 continue;
1227 }
1228
1229 pal_pci_cfg_read(bus, dev, func, HEADER_OFFSET,
1230 &header_value);
1231 if (PCIE_HEADER_TYPE(header_value) ==
1232 TYPE1_HEADER) {
1233 pal_pci_cfg_read(bus, dev, func,
1234 BUS_NUM_REG_OFFSET,
1235 &bus_value);
1236
1237 bus_value = bus_value &
1238 PRI_BUS_CLEAR_MASK;
1239
1240 pal_pci_cfg_write(bus, dev, func,
1241 BUS_NUM_REG_OFFSET,
1242 bus_value);
1243 }
1244 }
1245 }
1246 }
1247}
1248
1249static void pcie_enumerate_devices(void)
1250{
1251 uint32_t pri_bus, sec_bus;
1252 int rc;
1253
1254 g_pcie_info_table = plat_pcie_get_info_table();
1255 if (g_pcie_info_table == NULL) {
1256 ERROR("PCIe info not returned by platform\n");
1257 panic();
1258 }
1259
1260 if (g_pcie_info_table->num_entries == 0) {
1261 INFO("Skipping Enumeration\n");
1262 return;
1263 }
1264
1265 /* Get platform specific bar config parameters */
1266 rc = plat_pcie_get_bar_config(&g_bar64_p_start, &g_rp_bar64_value,
1267 &g_bar32_np_start, &g_bar32_p_start,
1268 &g_rp_bar32_value);
1269 if (rc != 0) {
1270 ERROR("PCIe bar config parameters not returned by platform\n");
1271 panic();
1272 }
1273
1274 INFO("Starting Enumeration\n");
1275 while (g_pcie_index < g_pcie_info_table->num_entries) {
1276 pri_bus = g_pcie_info_table->block[g_pcie_index].start_bus_num;
1277
1278 sec_bus = pri_bus + 1;
1279
1280 pcie_enumerate_device(pri_bus, sec_bus);
1281 pcie_clear_pri_bus();
1282
1283 g_pcie_index++;
1284 }
1285 g_enumerate = 0;
1286 g_pcie_index = 0;
1287}
1288
Soby Mathew5929bfe2024-11-28 12:28:00 +00001289void pcie_init(void)
1290{
1291 static bool is_init;
1292
1293 /* Create PCIe table and enumeration */
1294 if (!is_init) {
Arunachalam Ganapathy503b89a2025-06-19 10:34:11 +01001295 pcie_enumerate_devices();
1296
Soby Mathew5929bfe2024-11-28 12:28:00 +00001297 pcie_create_info_table();
1298 is_init = true;
1299 }
1300}