David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Dynamic reconfiguration memory support |
| 4 | * |
| 5 | * Copyright 2017 IBM Corporation |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #define pr_fmt(fmt) "drmem: " fmt |
| 9 | |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/of.h> |
| 12 | #include <linux/of_fdt.h> |
| 13 | #include <linux/memblock.h> |
| 14 | #include <asm/prom.h> |
| 15 | #include <asm/drmem.h> |
| 16 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 17 | static int n_root_addr_cells, n_root_size_cells; |
| 18 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 19 | static struct drmem_lmb_info __drmem_info; |
| 20 | struct drmem_lmb_info *drmem_info = &__drmem_info; |
| 21 | |
| 22 | u64 drmem_lmb_memory_max(void) |
| 23 | { |
| 24 | struct drmem_lmb *last_lmb; |
| 25 | |
| 26 | last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1]; |
| 27 | return last_lmb->base_addr + drmem_lmb_size(); |
| 28 | } |
| 29 | |
| 30 | static u32 drmem_lmb_flags(struct drmem_lmb *lmb) |
| 31 | { |
| 32 | /* |
| 33 | * Return the value of the lmb flags field minus the reserved |
| 34 | * bit used internally for hotplug processing. |
| 35 | */ |
| 36 | return lmb->flags & ~DRMEM_LMB_RESERVED; |
| 37 | } |
| 38 | |
| 39 | static struct property *clone_property(struct property *prop, u32 prop_sz) |
| 40 | { |
| 41 | struct property *new_prop; |
| 42 | |
| 43 | new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); |
| 44 | if (!new_prop) |
| 45 | return NULL; |
| 46 | |
| 47 | new_prop->name = kstrdup(prop->name, GFP_KERNEL); |
| 48 | new_prop->value = kzalloc(prop_sz, GFP_KERNEL); |
| 49 | if (!new_prop->name || !new_prop->value) { |
| 50 | kfree(new_prop->name); |
| 51 | kfree(new_prop->value); |
| 52 | kfree(new_prop); |
| 53 | return NULL; |
| 54 | } |
| 55 | |
| 56 | new_prop->length = prop_sz; |
| 57 | #if defined(CONFIG_OF_DYNAMIC) |
| 58 | of_property_set_flag(new_prop, OF_DYNAMIC); |
| 59 | #endif |
| 60 | return new_prop; |
| 61 | } |
| 62 | |
| 63 | static int drmem_update_dt_v1(struct device_node *memory, |
| 64 | struct property *prop) |
| 65 | { |
| 66 | struct property *new_prop; |
| 67 | struct of_drconf_cell_v1 *dr_cell; |
| 68 | struct drmem_lmb *lmb; |
| 69 | u32 *p; |
| 70 | |
| 71 | new_prop = clone_property(prop, prop->length); |
| 72 | if (!new_prop) |
| 73 | return -1; |
| 74 | |
| 75 | p = new_prop->value; |
| 76 | *p++ = cpu_to_be32(drmem_info->n_lmbs); |
| 77 | |
| 78 | dr_cell = (struct of_drconf_cell_v1 *)p; |
| 79 | |
| 80 | for_each_drmem_lmb(lmb) { |
| 81 | dr_cell->base_addr = cpu_to_be64(lmb->base_addr); |
| 82 | dr_cell->drc_index = cpu_to_be32(lmb->drc_index); |
| 83 | dr_cell->aa_index = cpu_to_be32(lmb->aa_index); |
| 84 | dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb)); |
| 85 | |
| 86 | dr_cell++; |
| 87 | } |
| 88 | |
| 89 | of_update_property(memory, new_prop); |
| 90 | return 0; |
| 91 | } |
| 92 | |
| 93 | static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell, |
| 94 | struct drmem_lmb *lmb) |
| 95 | { |
| 96 | dr_cell->base_addr = cpu_to_be64(lmb->base_addr); |
| 97 | dr_cell->drc_index = cpu_to_be32(lmb->drc_index); |
| 98 | dr_cell->aa_index = cpu_to_be32(lmb->aa_index); |
| 99 | dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb)); |
| 100 | } |
| 101 | |
| 102 | static int drmem_update_dt_v2(struct device_node *memory, |
| 103 | struct property *prop) |
| 104 | { |
| 105 | struct property *new_prop; |
| 106 | struct of_drconf_cell_v2 *dr_cell; |
| 107 | struct drmem_lmb *lmb, *prev_lmb; |
| 108 | u32 lmb_sets, prop_sz, seq_lmbs; |
| 109 | u32 *p; |
| 110 | |
| 111 | /* First pass, determine how many LMB sets are needed. */ |
| 112 | lmb_sets = 0; |
| 113 | prev_lmb = NULL; |
| 114 | for_each_drmem_lmb(lmb) { |
| 115 | if (!prev_lmb) { |
| 116 | prev_lmb = lmb; |
| 117 | lmb_sets++; |
| 118 | continue; |
| 119 | } |
| 120 | |
| 121 | if (prev_lmb->aa_index != lmb->aa_index || |
| 122 | drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) |
| 123 | lmb_sets++; |
| 124 | |
| 125 | prev_lmb = lmb; |
| 126 | } |
| 127 | |
| 128 | prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32); |
| 129 | new_prop = clone_property(prop, prop_sz); |
| 130 | if (!new_prop) |
| 131 | return -1; |
| 132 | |
| 133 | p = new_prop->value; |
| 134 | *p++ = cpu_to_be32(lmb_sets); |
| 135 | |
| 136 | dr_cell = (struct of_drconf_cell_v2 *)p; |
| 137 | |
| 138 | /* Second pass, populate the LMB set data */ |
| 139 | prev_lmb = NULL; |
| 140 | seq_lmbs = 0; |
| 141 | for_each_drmem_lmb(lmb) { |
| 142 | if (prev_lmb == NULL) { |
| 143 | /* Start of first LMB set */ |
| 144 | prev_lmb = lmb; |
| 145 | init_drconf_v2_cell(dr_cell, lmb); |
| 146 | seq_lmbs++; |
| 147 | continue; |
| 148 | } |
| 149 | |
| 150 | if (prev_lmb->aa_index != lmb->aa_index || |
| 151 | drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) { |
| 152 | /* end of one set, start of another */ |
| 153 | dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs); |
| 154 | dr_cell++; |
| 155 | |
| 156 | init_drconf_v2_cell(dr_cell, lmb); |
| 157 | seq_lmbs = 1; |
| 158 | } else { |
| 159 | seq_lmbs++; |
| 160 | } |
| 161 | |
| 162 | prev_lmb = lmb; |
| 163 | } |
| 164 | |
| 165 | /* close out last LMB set */ |
| 166 | dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs); |
| 167 | of_update_property(memory, new_prop); |
| 168 | return 0; |
| 169 | } |
| 170 | |
| 171 | int drmem_update_dt(void) |
| 172 | { |
| 173 | struct device_node *memory; |
| 174 | struct property *prop; |
| 175 | int rc = -1; |
| 176 | |
| 177 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); |
| 178 | if (!memory) |
| 179 | return -1; |
| 180 | |
| 181 | prop = of_find_property(memory, "ibm,dynamic-memory", NULL); |
| 182 | if (prop) { |
| 183 | rc = drmem_update_dt_v1(memory, prop); |
| 184 | } else { |
| 185 | prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL); |
| 186 | if (prop) |
| 187 | rc = drmem_update_dt_v2(memory, prop); |
| 188 | } |
| 189 | |
| 190 | of_node_put(memory); |
| 191 | return rc; |
| 192 | } |
| 193 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 194 | static void read_drconf_v1_cell(struct drmem_lmb *lmb, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 195 | const __be32 **prop) |
| 196 | { |
| 197 | const __be32 *p = *prop; |
| 198 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 199 | lmb->base_addr = of_read_number(p, n_root_addr_cells); |
| 200 | p += n_root_addr_cells; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 201 | lmb->drc_index = of_read_number(p++, 1); |
| 202 | |
| 203 | p++; /* skip reserved field */ |
| 204 | |
| 205 | lmb->aa_index = of_read_number(p++, 1); |
| 206 | lmb->flags = of_read_number(p++, 1); |
| 207 | |
| 208 | *prop = p; |
| 209 | } |
| 210 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 211 | static int |
| 212 | __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, void *data, |
| 213 | int (*func)(struct drmem_lmb *, const __be32 **, void *)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 214 | { |
| 215 | struct drmem_lmb lmb; |
| 216 | u32 i, n_lmbs; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 217 | int ret = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 218 | |
| 219 | n_lmbs = of_read_number(prop++, 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 220 | for (i = 0; i < n_lmbs; i++) { |
| 221 | read_drconf_v1_cell(&lmb, &prop); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 222 | ret = func(&lmb, &usm, data); |
| 223 | if (ret) |
| 224 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 225 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 226 | |
| 227 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 228 | } |
| 229 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 230 | static void read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 231 | const __be32 **prop) |
| 232 | { |
| 233 | const __be32 *p = *prop; |
| 234 | |
| 235 | dr_cell->seq_lmbs = of_read_number(p++, 1); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 236 | dr_cell->base_addr = of_read_number(p, n_root_addr_cells); |
| 237 | p += n_root_addr_cells; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 238 | dr_cell->drc_index = of_read_number(p++, 1); |
| 239 | dr_cell->aa_index = of_read_number(p++, 1); |
| 240 | dr_cell->flags = of_read_number(p++, 1); |
| 241 | |
| 242 | *prop = p; |
| 243 | } |
| 244 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 245 | static int |
| 246 | __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, void *data, |
| 247 | int (*func)(struct drmem_lmb *, const __be32 **, void *)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 248 | { |
| 249 | struct of_drconf_cell_v2 dr_cell; |
| 250 | struct drmem_lmb lmb; |
| 251 | u32 i, j, lmb_sets; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 252 | int ret = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 253 | |
| 254 | lmb_sets = of_read_number(prop++, 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 255 | for (i = 0; i < lmb_sets; i++) { |
| 256 | read_drconf_v2_cell(&dr_cell, &prop); |
| 257 | |
| 258 | for (j = 0; j < dr_cell.seq_lmbs; j++) { |
| 259 | lmb.base_addr = dr_cell.base_addr; |
| 260 | dr_cell.base_addr += drmem_lmb_size(); |
| 261 | |
| 262 | lmb.drc_index = dr_cell.drc_index; |
| 263 | dr_cell.drc_index++; |
| 264 | |
| 265 | lmb.aa_index = dr_cell.aa_index; |
| 266 | lmb.flags = dr_cell.flags; |
| 267 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 268 | ret = func(&lmb, &usm, data); |
| 269 | if (ret) |
| 270 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 271 | } |
| 272 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 273 | |
| 274 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 275 | } |
| 276 | |
| 277 | #ifdef CONFIG_PPC_PSERIES |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 278 | int __init walk_drmem_lmbs_early(unsigned long node, void *data, |
| 279 | int (*func)(struct drmem_lmb *, const __be32 **, void *)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 280 | { |
| 281 | const __be32 *prop, *usm; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 282 | int len, ret = -ENODEV; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 283 | |
| 284 | prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len); |
| 285 | if (!prop || len < dt_root_size_cells * sizeof(__be32)) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 286 | return ret; |
| 287 | |
| 288 | /* Get the address & size cells */ |
| 289 | n_root_addr_cells = dt_root_addr_cells; |
| 290 | n_root_size_cells = dt_root_size_cells; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 291 | |
| 292 | drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop); |
| 293 | |
| 294 | usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len); |
| 295 | |
| 296 | prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len); |
| 297 | if (prop) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 298 | ret = __walk_drmem_v1_lmbs(prop, usm, data, func); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 299 | } else { |
| 300 | prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2", |
| 301 | &len); |
| 302 | if (prop) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 303 | ret = __walk_drmem_v2_lmbs(prop, usm, data, func); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | memblock_dump_all(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 307 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 308 | } |
| 309 | |
| 310 | #endif |
| 311 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 312 | static int init_drmem_lmb_size(struct device_node *dn) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 313 | { |
| 314 | const __be32 *prop; |
| 315 | int len; |
| 316 | |
| 317 | if (drmem_info->lmb_size) |
| 318 | return 0; |
| 319 | |
| 320 | prop = of_get_property(dn, "ibm,lmb-size", &len); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 321 | if (!prop || len < n_root_size_cells * sizeof(__be32)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 322 | pr_info("Could not determine LMB size\n"); |
| 323 | return -1; |
| 324 | } |
| 325 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 326 | drmem_info->lmb_size = of_read_number(prop, n_root_size_cells); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 327 | return 0; |
| 328 | } |
| 329 | |
| 330 | /* |
| 331 | * Returns the property linux,drconf-usable-memory if |
| 332 | * it exists (the property exists only in kexec/kdump kernels, |
| 333 | * added by kexec-tools) |
| 334 | */ |
| 335 | static const __be32 *of_get_usable_memory(struct device_node *dn) |
| 336 | { |
| 337 | const __be32 *prop; |
| 338 | u32 len; |
| 339 | |
| 340 | prop = of_get_property(dn, "linux,drconf-usable-memory", &len); |
| 341 | if (!prop || len < sizeof(unsigned int)) |
| 342 | return NULL; |
| 343 | |
| 344 | return prop; |
| 345 | } |
| 346 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 347 | int walk_drmem_lmbs(struct device_node *dn, void *data, |
| 348 | int (*func)(struct drmem_lmb *, const __be32 **, void *)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 349 | { |
| 350 | const __be32 *prop, *usm; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 351 | int ret = -ENODEV; |
| 352 | |
| 353 | if (!of_root) |
| 354 | return ret; |
| 355 | |
| 356 | /* Get the address & size cells */ |
| 357 | of_node_get(of_root); |
| 358 | n_root_addr_cells = of_n_addr_cells(of_root); |
| 359 | n_root_size_cells = of_n_size_cells(of_root); |
| 360 | of_node_put(of_root); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 361 | |
| 362 | if (init_drmem_lmb_size(dn)) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 363 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 364 | |
| 365 | usm = of_get_usable_memory(dn); |
| 366 | |
| 367 | prop = of_get_property(dn, "ibm,dynamic-memory", NULL); |
| 368 | if (prop) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 369 | ret = __walk_drmem_v1_lmbs(prop, usm, data, func); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 370 | } else { |
| 371 | prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL); |
| 372 | if (prop) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 373 | ret = __walk_drmem_v2_lmbs(prop, usm, data, func); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 374 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 375 | |
| 376 | return ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | static void __init init_drmem_v1_lmbs(const __be32 *prop) |
| 380 | { |
| 381 | struct drmem_lmb *lmb; |
| 382 | |
| 383 | drmem_info->n_lmbs = of_read_number(prop++, 1); |
| 384 | if (drmem_info->n_lmbs == 0) |
| 385 | return; |
| 386 | |
| 387 | drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb), |
| 388 | GFP_KERNEL); |
| 389 | if (!drmem_info->lmbs) |
| 390 | return; |
| 391 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 392 | for_each_drmem_lmb(lmb) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 393 | read_drconf_v1_cell(lmb, &prop); |
| 394 | } |
| 395 | |
| 396 | static void __init init_drmem_v2_lmbs(const __be32 *prop) |
| 397 | { |
| 398 | struct drmem_lmb *lmb; |
| 399 | struct of_drconf_cell_v2 dr_cell; |
| 400 | const __be32 *p; |
| 401 | u32 i, j, lmb_sets; |
| 402 | int lmb_index; |
| 403 | |
| 404 | lmb_sets = of_read_number(prop++, 1); |
| 405 | if (lmb_sets == 0) |
| 406 | return; |
| 407 | |
| 408 | /* first pass, calculate the number of LMBs */ |
| 409 | p = prop; |
| 410 | for (i = 0; i < lmb_sets; i++) { |
| 411 | read_drconf_v2_cell(&dr_cell, &p); |
| 412 | drmem_info->n_lmbs += dr_cell.seq_lmbs; |
| 413 | } |
| 414 | |
| 415 | drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb), |
| 416 | GFP_KERNEL); |
| 417 | if (!drmem_info->lmbs) |
| 418 | return; |
| 419 | |
| 420 | /* second pass, read in the LMB information */ |
| 421 | lmb_index = 0; |
| 422 | p = prop; |
| 423 | |
| 424 | for (i = 0; i < lmb_sets; i++) { |
| 425 | read_drconf_v2_cell(&dr_cell, &p); |
| 426 | |
| 427 | for (j = 0; j < dr_cell.seq_lmbs; j++) { |
| 428 | lmb = &drmem_info->lmbs[lmb_index++]; |
| 429 | |
| 430 | lmb->base_addr = dr_cell.base_addr; |
| 431 | dr_cell.base_addr += drmem_info->lmb_size; |
| 432 | |
| 433 | lmb->drc_index = dr_cell.drc_index; |
| 434 | dr_cell.drc_index++; |
| 435 | |
| 436 | lmb->aa_index = dr_cell.aa_index; |
| 437 | lmb->flags = dr_cell.flags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 438 | } |
| 439 | } |
| 440 | } |
| 441 | |
| 442 | static int __init drmem_init(void) |
| 443 | { |
| 444 | struct device_node *dn; |
| 445 | const __be32 *prop; |
| 446 | |
| 447 | dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); |
| 448 | if (!dn) { |
| 449 | pr_info("No dynamic reconfiguration memory found\n"); |
| 450 | return 0; |
| 451 | } |
| 452 | |
| 453 | if (init_drmem_lmb_size(dn)) { |
| 454 | of_node_put(dn); |
| 455 | return 0; |
| 456 | } |
| 457 | |
| 458 | prop = of_get_property(dn, "ibm,dynamic-memory", NULL); |
| 459 | if (prop) { |
| 460 | init_drmem_v1_lmbs(prop); |
| 461 | } else { |
| 462 | prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL); |
| 463 | if (prop) |
| 464 | init_drmem_v2_lmbs(prop); |
| 465 | } |
| 466 | |
| 467 | of_node_put(dn); |
| 468 | return 0; |
| 469 | } |
| 470 | late_initcall(drmem_init); |