Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. |
| 3 | * Copyright 2008 Red Hat Inc. |
| 4 | * Copyright 2009 Jerome Glisse. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the "Software"), |
| 8 | * to deal in the Software without restriction, including without limitation |
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 10 | * and/or sell copies of the Software, and to permit persons to whom the |
| 11 | * Software is furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | * |
| 24 | * Authors: Dave Airlie |
| 25 | * Alex Deucher |
| 26 | * Jerome Glisse |
| 27 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 28 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 29 | #include <linux/seq_file.h> |
| 30 | #include <linux/slab.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 31 | |
| 32 | #include <drm/drm_debugfs.h> |
| 33 | #include <drm/drm_device.h> |
| 34 | #include <drm/drm_file.h> |
| 35 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 36 | #include "radeon.h" |
| 37 | #include "radeon_asic.h" |
| 38 | #include "rs400d.h" |
| 39 | |
| 40 | /* This files gather functions specifics to : rs400,rs480 */ |
| 41 | static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); |
| 42 | |
| 43 | void rs400_gart_adjust_size(struct radeon_device *rdev) |
| 44 | { |
| 45 | /* Check gart size */ |
| 46 | switch (rdev->mc.gtt_size/(1024*1024)) { |
| 47 | case 32: |
| 48 | case 64: |
| 49 | case 128: |
| 50 | case 256: |
| 51 | case 512: |
| 52 | case 1024: |
| 53 | case 2048: |
| 54 | break; |
| 55 | default: |
| 56 | DRM_ERROR("Unable to use IGP GART size %uM\n", |
| 57 | (unsigned)(rdev->mc.gtt_size >> 20)); |
| 58 | DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n"); |
| 59 | DRM_ERROR("Forcing to 32M GART size\n"); |
| 60 | rdev->mc.gtt_size = 32 * 1024 * 1024; |
| 61 | return; |
| 62 | } |
| 63 | } |
| 64 | |
| 65 | void rs400_gart_tlb_flush(struct radeon_device *rdev) |
| 66 | { |
| 67 | uint32_t tmp; |
| 68 | unsigned int timeout = rdev->usec_timeout; |
| 69 | |
| 70 | WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE); |
| 71 | do { |
| 72 | tmp = RREG32_MC(RS480_GART_CACHE_CNTRL); |
| 73 | if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0) |
| 74 | break; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 75 | udelay(1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | timeout--; |
| 77 | } while (timeout > 0); |
| 78 | WREG32_MC(RS480_GART_CACHE_CNTRL, 0); |
| 79 | } |
| 80 | |
| 81 | int rs400_gart_init(struct radeon_device *rdev) |
| 82 | { |
| 83 | int r; |
| 84 | |
| 85 | if (rdev->gart.ptr) { |
| 86 | WARN(1, "RS400 GART already initialized\n"); |
| 87 | return 0; |
| 88 | } |
| 89 | /* Check gart size */ |
| 90 | switch(rdev->mc.gtt_size / (1024 * 1024)) { |
| 91 | case 32: |
| 92 | case 64: |
| 93 | case 128: |
| 94 | case 256: |
| 95 | case 512: |
| 96 | case 1024: |
| 97 | case 2048: |
| 98 | break; |
| 99 | default: |
| 100 | return -EINVAL; |
| 101 | } |
| 102 | /* Initialize common gart structure */ |
| 103 | r = radeon_gart_init(rdev); |
| 104 | if (r) |
| 105 | return r; |
| 106 | if (rs400_debugfs_pcie_gart_info_init(rdev)) |
| 107 | DRM_ERROR("Failed to register debugfs file for RS400 GART !\n"); |
| 108 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
| 109 | return radeon_gart_table_ram_alloc(rdev); |
| 110 | } |
| 111 | |
| 112 | int rs400_gart_enable(struct radeon_device *rdev) |
| 113 | { |
| 114 | uint32_t size_reg; |
| 115 | uint32_t tmp; |
| 116 | |
| 117 | tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); |
| 118 | tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; |
| 119 | WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); |
| 120 | /* Check gart size */ |
| 121 | switch(rdev->mc.gtt_size / (1024 * 1024)) { |
| 122 | case 32: |
| 123 | size_reg = RS480_VA_SIZE_32MB; |
| 124 | break; |
| 125 | case 64: |
| 126 | size_reg = RS480_VA_SIZE_64MB; |
| 127 | break; |
| 128 | case 128: |
| 129 | size_reg = RS480_VA_SIZE_128MB; |
| 130 | break; |
| 131 | case 256: |
| 132 | size_reg = RS480_VA_SIZE_256MB; |
| 133 | break; |
| 134 | case 512: |
| 135 | size_reg = RS480_VA_SIZE_512MB; |
| 136 | break; |
| 137 | case 1024: |
| 138 | size_reg = RS480_VA_SIZE_1GB; |
| 139 | break; |
| 140 | case 2048: |
| 141 | size_reg = RS480_VA_SIZE_2GB; |
| 142 | break; |
| 143 | default: |
| 144 | return -EINVAL; |
| 145 | } |
| 146 | /* It should be fine to program it to max value */ |
| 147 | if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) { |
| 148 | WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF); |
| 149 | WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0); |
| 150 | } else { |
| 151 | WREG32(RADEON_AGP_BASE, 0xFFFFFFFF); |
| 152 | WREG32(RS480_AGP_BASE_2, 0); |
| 153 | } |
| 154 | tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16); |
| 155 | tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16); |
| 156 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { |
| 157 | WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp); |
| 158 | tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; |
| 159 | WREG32(RADEON_BUS_CNTL, tmp); |
| 160 | } else { |
| 161 | WREG32(RADEON_MC_AGP_LOCATION, tmp); |
| 162 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; |
| 163 | WREG32(RADEON_BUS_CNTL, tmp); |
| 164 | } |
| 165 | /* Table should be in 32bits address space so ignore bits above. */ |
| 166 | tmp = (u32)rdev->gart.table_addr & 0xfffff000; |
| 167 | tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4; |
| 168 | |
| 169 | WREG32_MC(RS480_GART_BASE, tmp); |
| 170 | /* TODO: more tweaking here */ |
| 171 | WREG32_MC(RS480_GART_FEATURE_ID, |
| 172 | (RS480_TLB_ENABLE | |
| 173 | RS480_GTW_LAC_EN | RS480_1LEVEL_GART)); |
| 174 | /* Disable snooping */ |
| 175 | WREG32_MC(RS480_AGP_MODE_CNTL, |
| 176 | (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS); |
| 177 | /* Disable AGP mode */ |
| 178 | /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0, |
| 179 | * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */ |
| 180 | if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { |
| 181 | tmp = RREG32_MC(RS480_MC_MISC_CNTL); |
| 182 | tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN; |
| 183 | WREG32_MC(RS480_MC_MISC_CNTL, tmp); |
| 184 | } else { |
| 185 | tmp = RREG32_MC(RS480_MC_MISC_CNTL); |
| 186 | tmp |= RS480_GART_INDEX_REG_EN; |
| 187 | WREG32_MC(RS480_MC_MISC_CNTL, tmp); |
| 188 | } |
| 189 | /* Enable gart */ |
| 190 | WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); |
| 191 | rs400_gart_tlb_flush(rdev); |
| 192 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
| 193 | (unsigned)(rdev->mc.gtt_size >> 20), |
| 194 | (unsigned long long)rdev->gart.table_addr); |
| 195 | rdev->gart.ready = true; |
| 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | void rs400_gart_disable(struct radeon_device *rdev) |
| 200 | { |
| 201 | uint32_t tmp; |
| 202 | |
| 203 | tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); |
| 204 | tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; |
| 205 | WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); |
| 206 | WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0); |
| 207 | } |
| 208 | |
| 209 | void rs400_gart_fini(struct radeon_device *rdev) |
| 210 | { |
| 211 | radeon_gart_fini(rdev); |
| 212 | rs400_gart_disable(rdev); |
| 213 | radeon_gart_table_ram_free(rdev); |
| 214 | } |
| 215 | |
| 216 | #define RS400_PTE_UNSNOOPED (1 << 0) |
| 217 | #define RS400_PTE_WRITEABLE (1 << 2) |
| 218 | #define RS400_PTE_READABLE (1 << 3) |
| 219 | |
| 220 | uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) |
| 221 | { |
| 222 | uint32_t entry; |
| 223 | |
| 224 | entry = (lower_32_bits(addr) & PAGE_MASK) | |
| 225 | ((upper_32_bits(addr) & 0xff) << 4); |
| 226 | if (flags & RADEON_GART_PAGE_READ) |
| 227 | entry |= RS400_PTE_READABLE; |
| 228 | if (flags & RADEON_GART_PAGE_WRITE) |
| 229 | entry |= RS400_PTE_WRITEABLE; |
| 230 | if (!(flags & RADEON_GART_PAGE_SNOOP)) |
| 231 | entry |= RS400_PTE_UNSNOOPED; |
| 232 | return entry; |
| 233 | } |
| 234 | |
| 235 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, |
| 236 | uint64_t entry) |
| 237 | { |
| 238 | u32 *gtt = rdev->gart.ptr; |
| 239 | gtt[i] = cpu_to_le32(lower_32_bits(entry)); |
| 240 | } |
| 241 | |
| 242 | int rs400_mc_wait_for_idle(struct radeon_device *rdev) |
| 243 | { |
| 244 | unsigned i; |
| 245 | uint32_t tmp; |
| 246 | |
| 247 | for (i = 0; i < rdev->usec_timeout; i++) { |
| 248 | /* read MC_STATUS */ |
| 249 | tmp = RREG32(RADEON_MC_STATUS); |
| 250 | if (tmp & RADEON_MC_IDLE) { |
| 251 | return 0; |
| 252 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 253 | udelay(1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 254 | } |
| 255 | return -1; |
| 256 | } |
| 257 | |
| 258 | static void rs400_gpu_init(struct radeon_device *rdev) |
| 259 | { |
| 260 | /* FIXME: is this correct ? */ |
| 261 | r420_pipes_init(rdev); |
| 262 | if (rs400_mc_wait_for_idle(rdev)) { |
| 263 | pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n", |
| 264 | RREG32(RADEON_MC_STATUS)); |
| 265 | } |
| 266 | } |
| 267 | |
| 268 | static void rs400_mc_init(struct radeon_device *rdev) |
| 269 | { |
| 270 | u64 base; |
| 271 | |
| 272 | rs400_gart_adjust_size(rdev); |
| 273 | rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev); |
| 274 | /* DDR for all card after R300 & IGP */ |
| 275 | rdev->mc.vram_is_ddr = true; |
| 276 | rdev->mc.vram_width = 128; |
| 277 | r100_vram_init_sizes(rdev); |
| 278 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
| 279 | radeon_vram_location(rdev, &rdev->mc, base); |
| 280 | rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; |
| 281 | radeon_gtt_location(rdev, &rdev->mc); |
| 282 | radeon_update_bandwidth_info(rdev); |
| 283 | } |
| 284 | |
| 285 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
| 286 | { |
| 287 | unsigned long flags; |
| 288 | uint32_t r; |
| 289 | |
| 290 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
| 291 | WREG32(RS480_NB_MC_INDEX, reg & 0xff); |
| 292 | r = RREG32(RS480_NB_MC_DATA); |
| 293 | WREG32(RS480_NB_MC_INDEX, 0xff); |
| 294 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
| 295 | return r; |
| 296 | } |
| 297 | |
| 298 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
| 299 | { |
| 300 | unsigned long flags; |
| 301 | |
| 302 | spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
| 303 | WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN); |
| 304 | WREG32(RS480_NB_MC_DATA, (v)); |
| 305 | WREG32(RS480_NB_MC_INDEX, 0xff); |
| 306 | spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
| 307 | } |
| 308 | |
| 309 | #if defined(CONFIG_DEBUG_FS) |
| 310 | static int rs400_debugfs_gart_info(struct seq_file *m, void *data) |
| 311 | { |
| 312 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 313 | struct drm_device *dev = node->minor->dev; |
| 314 | struct radeon_device *rdev = dev->dev_private; |
| 315 | uint32_t tmp; |
| 316 | |
| 317 | tmp = RREG32(RADEON_HOST_PATH_CNTL); |
| 318 | seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp); |
| 319 | tmp = RREG32(RADEON_BUS_CNTL); |
| 320 | seq_printf(m, "BUS_CNTL 0x%08x\n", tmp); |
| 321 | tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); |
| 322 | seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp); |
| 323 | if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) { |
| 324 | tmp = RREG32_MC(RS690_MCCFG_AGP_BASE); |
| 325 | seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp); |
| 326 | tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2); |
| 327 | seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp); |
| 328 | tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION); |
| 329 | seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp); |
| 330 | tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION); |
| 331 | seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp); |
| 332 | tmp = RREG32(RS690_HDP_FB_LOCATION); |
| 333 | seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp); |
| 334 | } else { |
| 335 | tmp = RREG32(RADEON_AGP_BASE); |
| 336 | seq_printf(m, "AGP_BASE 0x%08x\n", tmp); |
| 337 | tmp = RREG32(RS480_AGP_BASE_2); |
| 338 | seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp); |
| 339 | tmp = RREG32(RADEON_MC_AGP_LOCATION); |
| 340 | seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp); |
| 341 | } |
| 342 | tmp = RREG32_MC(RS480_GART_BASE); |
| 343 | seq_printf(m, "GART_BASE 0x%08x\n", tmp); |
| 344 | tmp = RREG32_MC(RS480_GART_FEATURE_ID); |
| 345 | seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp); |
| 346 | tmp = RREG32_MC(RS480_AGP_MODE_CNTL); |
| 347 | seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp); |
| 348 | tmp = RREG32_MC(RS480_MC_MISC_CNTL); |
| 349 | seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp); |
| 350 | tmp = RREG32_MC(0x5F); |
| 351 | seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp); |
| 352 | tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE); |
| 353 | seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp); |
| 354 | tmp = RREG32_MC(RS480_GART_CACHE_CNTRL); |
| 355 | seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp); |
| 356 | tmp = RREG32_MC(0x3B); |
| 357 | seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp); |
| 358 | tmp = RREG32_MC(0x3C); |
| 359 | seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp); |
| 360 | tmp = RREG32_MC(0x30); |
| 361 | seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp); |
| 362 | tmp = RREG32_MC(0x31); |
| 363 | seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp); |
| 364 | tmp = RREG32_MC(0x32); |
| 365 | seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp); |
| 366 | tmp = RREG32_MC(0x33); |
| 367 | seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp); |
| 368 | tmp = RREG32_MC(0x34); |
| 369 | seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp); |
| 370 | tmp = RREG32_MC(0x35); |
| 371 | seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp); |
| 372 | tmp = RREG32_MC(0x36); |
| 373 | seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp); |
| 374 | tmp = RREG32_MC(0x37); |
| 375 | seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp); |
| 376 | return 0; |
| 377 | } |
| 378 | |
| 379 | static struct drm_info_list rs400_gart_info_list[] = { |
| 380 | {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL}, |
| 381 | }; |
| 382 | #endif |
| 383 | |
| 384 | static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
| 385 | { |
| 386 | #if defined(CONFIG_DEBUG_FS) |
| 387 | return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); |
| 388 | #else |
| 389 | return 0; |
| 390 | #endif |
| 391 | } |
| 392 | |
| 393 | static void rs400_mc_program(struct radeon_device *rdev) |
| 394 | { |
| 395 | struct r100_mc_save save; |
| 396 | |
| 397 | /* Stops all mc clients */ |
| 398 | r100_mc_stop(rdev, &save); |
| 399 | |
| 400 | /* Wait for mc idle */ |
| 401 | if (rs400_mc_wait_for_idle(rdev)) |
| 402 | dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n"); |
| 403 | WREG32(R_000148_MC_FB_LOCATION, |
| 404 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | |
| 405 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
| 406 | |
| 407 | r100_mc_resume(rdev, &save); |
| 408 | } |
| 409 | |
| 410 | static int rs400_startup(struct radeon_device *rdev) |
| 411 | { |
| 412 | int r; |
| 413 | |
| 414 | r100_set_common_regs(rdev); |
| 415 | |
| 416 | rs400_mc_program(rdev); |
| 417 | /* Resume clock */ |
| 418 | r300_clock_startup(rdev); |
| 419 | /* Initialize GPU configuration (# pipes, ...) */ |
| 420 | rs400_gpu_init(rdev); |
| 421 | r100_enable_bm(rdev); |
| 422 | /* Initialize GART (initialize after TTM so we can allocate |
| 423 | * memory through TTM but finalize after TTM) */ |
| 424 | r = rs400_gart_enable(rdev); |
| 425 | if (r) |
| 426 | return r; |
| 427 | |
| 428 | /* allocate wb buffer */ |
| 429 | r = radeon_wb_init(rdev); |
| 430 | if (r) |
| 431 | return r; |
| 432 | |
| 433 | r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
| 434 | if (r) { |
| 435 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
| 436 | return r; |
| 437 | } |
| 438 | |
| 439 | /* Enable IRQ */ |
| 440 | if (!rdev->irq.installed) { |
| 441 | r = radeon_irq_kms_init(rdev); |
| 442 | if (r) |
| 443 | return r; |
| 444 | } |
| 445 | |
| 446 | r100_irq_set(rdev); |
| 447 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
| 448 | /* 1M ring buffer */ |
| 449 | r = r100_cp_init(rdev, 1024 * 1024); |
| 450 | if (r) { |
| 451 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
| 452 | return r; |
| 453 | } |
| 454 | |
| 455 | r = radeon_ib_pool_init(rdev); |
| 456 | if (r) { |
| 457 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
| 458 | return r; |
| 459 | } |
| 460 | |
| 461 | return 0; |
| 462 | } |
| 463 | |
| 464 | int rs400_resume(struct radeon_device *rdev) |
| 465 | { |
| 466 | int r; |
| 467 | |
| 468 | /* Make sur GART are not working */ |
| 469 | rs400_gart_disable(rdev); |
| 470 | /* Resume clock before doing reset */ |
| 471 | r300_clock_startup(rdev); |
| 472 | /* setup MC before calling post tables */ |
| 473 | rs400_mc_program(rdev); |
| 474 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
| 475 | if (radeon_asic_reset(rdev)) { |
| 476 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
| 477 | RREG32(R_000E40_RBBM_STATUS), |
| 478 | RREG32(R_0007C0_CP_STAT)); |
| 479 | } |
| 480 | /* post */ |
| 481 | radeon_combios_asic_init(rdev->ddev); |
| 482 | /* Resume clock after posting */ |
| 483 | r300_clock_startup(rdev); |
| 484 | /* Initialize surface registers */ |
| 485 | radeon_surface_init(rdev); |
| 486 | |
| 487 | rdev->accel_working = true; |
| 488 | r = rs400_startup(rdev); |
| 489 | if (r) { |
| 490 | rdev->accel_working = false; |
| 491 | } |
| 492 | return r; |
| 493 | } |
| 494 | |
| 495 | int rs400_suspend(struct radeon_device *rdev) |
| 496 | { |
| 497 | radeon_pm_suspend(rdev); |
| 498 | r100_cp_disable(rdev); |
| 499 | radeon_wb_disable(rdev); |
| 500 | r100_irq_disable(rdev); |
| 501 | rs400_gart_disable(rdev); |
| 502 | return 0; |
| 503 | } |
| 504 | |
| 505 | void rs400_fini(struct radeon_device *rdev) |
| 506 | { |
| 507 | radeon_pm_fini(rdev); |
| 508 | r100_cp_fini(rdev); |
| 509 | radeon_wb_fini(rdev); |
| 510 | radeon_ib_pool_fini(rdev); |
| 511 | radeon_gem_fini(rdev); |
| 512 | rs400_gart_fini(rdev); |
| 513 | radeon_irq_kms_fini(rdev); |
| 514 | radeon_fence_driver_fini(rdev); |
| 515 | radeon_bo_fini(rdev); |
| 516 | radeon_atombios_fini(rdev); |
| 517 | kfree(rdev->bios); |
| 518 | rdev->bios = NULL; |
| 519 | } |
| 520 | |
| 521 | int rs400_init(struct radeon_device *rdev) |
| 522 | { |
| 523 | int r; |
| 524 | |
| 525 | /* Disable VGA */ |
| 526 | r100_vga_render_disable(rdev); |
| 527 | /* Initialize scratch registers */ |
| 528 | radeon_scratch_init(rdev); |
| 529 | /* Initialize surface registers */ |
| 530 | radeon_surface_init(rdev); |
| 531 | /* TODO: disable VGA need to use VGA request */ |
| 532 | /* restore some register to sane defaults */ |
| 533 | r100_restore_sanity(rdev); |
| 534 | /* BIOS*/ |
| 535 | if (!radeon_get_bios(rdev)) { |
| 536 | if (ASIC_IS_AVIVO(rdev)) |
| 537 | return -EINVAL; |
| 538 | } |
| 539 | if (rdev->is_atom_bios) { |
| 540 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); |
| 541 | return -EINVAL; |
| 542 | } else { |
| 543 | r = radeon_combios_init(rdev); |
| 544 | if (r) |
| 545 | return r; |
| 546 | } |
| 547 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
| 548 | if (radeon_asic_reset(rdev)) { |
| 549 | dev_warn(rdev->dev, |
| 550 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", |
| 551 | RREG32(R_000E40_RBBM_STATUS), |
| 552 | RREG32(R_0007C0_CP_STAT)); |
| 553 | } |
| 554 | /* check if cards are posted or not */ |
| 555 | if (radeon_boot_test_post_card(rdev) == false) |
| 556 | return -EINVAL; |
| 557 | |
| 558 | /* Initialize clocks */ |
| 559 | radeon_get_clock_info(rdev->ddev); |
| 560 | /* initialize memory controller */ |
| 561 | rs400_mc_init(rdev); |
| 562 | /* Fence driver */ |
| 563 | r = radeon_fence_driver_init(rdev); |
| 564 | if (r) |
| 565 | return r; |
| 566 | /* Memory manager */ |
| 567 | r = radeon_bo_init(rdev); |
| 568 | if (r) |
| 569 | return r; |
| 570 | r = rs400_gart_init(rdev); |
| 571 | if (r) |
| 572 | return r; |
| 573 | r300_set_reg_safe(rdev); |
| 574 | |
| 575 | /* Initialize power management */ |
| 576 | radeon_pm_init(rdev); |
| 577 | |
| 578 | rdev->accel_working = true; |
| 579 | r = rs400_startup(rdev); |
| 580 | if (r) { |
| 581 | /* Somethings want wront with the accel init stop accel */ |
| 582 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
| 583 | r100_cp_fini(rdev); |
| 584 | radeon_wb_fini(rdev); |
| 585 | radeon_ib_pool_fini(rdev); |
| 586 | rs400_gart_fini(rdev); |
| 587 | radeon_irq_kms_fini(rdev); |
| 588 | rdev->accel_working = false; |
| 589 | } |
| 590 | return 0; |
| 591 | } |