diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 230 |
1 files changed, 194 insertions, 36 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 45d1172b7bff..17428cb8a581 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -29,7 +29,6 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" #include "amdgpu_psp.h" -#include "amdgpu_smu.h" #include "nv.h" #include "nvd.h" @@ -174,6 +173,11 @@ #define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030 #define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0 +#define GFX_RLCG_GC_WRITE_OLD (0x8 << 28) +#define GFX_RLCG_GC_WRITE (0x0 << 28) +#define GFX_RLCG_GC_READ (0x1 << 28) +#define GFX_RLCG_MMHUB_WRITE (0x2 << 28) + MODULE_FIRMWARE("amdgpu/navi10_ce.bin"); MODULE_FIRMWARE("amdgpu/navi10_pfp.bin"); MODULE_FIRMWARE("amdgpu/navi10_me.bin"); @@ -228,6 +232,13 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_mec.bin"); MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_mec2.bin"); MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_rlc.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_ce.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_pfp.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_me.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_mec.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_mec2.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_rlc.bin"); + static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), @@ -1391,9 +1402,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4), @@ -1411,46 +1423,136 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000) }; -static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v) +static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write) +{ + /* always programed by rlcg, only for gc */ + if (offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI) || + offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO) || + offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH) || + offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL) || + offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX) || + offset == SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)) { + if (!amdgpu_sriov_reg_indirect_gc(adev)) + *flag = GFX_RLCG_GC_WRITE_OLD; + else + *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ; + + return true; + } + + /* currently support gc read/write, mmhub write */ + if (offset >= SOC15_REG_OFFSET(GC, 0, mmSDMA0_DEC_START) && + offset <= SOC15_REG_OFFSET(GC, 0, mmRLC_GTS_OFFSET_MSB)) { + if (amdgpu_sriov_reg_indirect_gc(adev)) + *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ; + else + return false; + } else { + if (amdgpu_sriov_reg_indirect_mmhub(adev)) + *flag = GFX_RLCG_MMHUB_WRITE; + else + return false; + } + + return true; +} + +static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag) { static void *scratch_reg0; static void *scratch_reg1; + static void *scratch_reg2; + static void *scratch_reg3; static void *spare_int; + static uint32_t grbm_cntl; + static uint32_t grbm_idx; uint32_t i = 0; uint32_t retries = 50000; + u32 ret = 0; + + scratch_reg0 = adev->rmmio + + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0) * 4; + scratch_reg1 = adev->rmmio + + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1) * 4; + scratch_reg2 = adev->rmmio + + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4; + scratch_reg3 = adev->rmmio + + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4; + spare_int = adev->rmmio + + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4; + + grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; + grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; + + if (offset == grbm_cntl || offset == grbm_idx) { + if (offset == grbm_cntl) + writel(v, scratch_reg2); + else if (offset == grbm_idx) + writel(v, scratch_reg3); + + writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); + } else { + writel(v, scratch_reg0); + writel(offset | flag, scratch_reg1); + writel(1, spare_int); + for (i = 0; i < retries; i++) { + u32 tmp; + + tmp = readl(scratch_reg1); + if (!(tmp & flag)) + break; - scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4; - scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4; - spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4; + udelay(10); + } - if (amdgpu_sriov_runtime(adev)) { - pr_err("shouldn't call rlcg write register during runtime\n"); - return; + if (i >= retries) + pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset); } - writel(v, scratch_reg0); - writel(offset | 0x80000000, scratch_reg1); - writel(1, spare_int); - for (i = 0; i < retries; i++) { - u32 tmp; + ret = readl(scratch_reg0); - tmp = readl(scratch_reg1); - if (!(tmp & 0x80000000)) - break; + return ret; +} + +static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 flag) +{ + uint32_t rlcg_flag; - udelay(10); + if (amdgpu_sriov_fullaccess(adev) && + gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 1)) { + gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag); + + return; } + if (flag & AMDGPU_REGS_NO_KIQ) + WREG32_NO_KIQ(offset, value); + else + WREG32(offset, value); +} + +static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 flag) +{ + uint32_t rlcg_flag; + + if (amdgpu_sriov_fullaccess(adev) && + gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 0)) + return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag); - if (i >= retries) - pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset); + if (flag & AMDGPU_REGS_NO_KIQ) + return RREG32_NO_KIQ(offset); + else + return RREG32(offset); + + return 0; } static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = @@ -3280,7 +3382,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00001d00, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x00001d00, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), @@ -3311,6 +3413,41 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020) }; +static const struct soc15_reg_golden golden_settings_gc_10_3_5[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xb0000ff0, 0x30000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff000000, 0x7e000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x000001ff, 0x00000020), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xe07df47f, 0x00180070), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER0_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER1_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER10_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER11_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER12_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER13_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER14_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER15_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER2_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER3_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER4_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER5_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER6_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX,0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000) +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -3531,6 +3668,11 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_10_3_4, (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4)); break; + case CHIP_BEIGE_GOBY: + soc15_program_register_sequence(adev, + golden_settings_gc_10_3_5, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3_5)); + break; default: break; } @@ -3716,6 +3858,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: adev->gfx.cp_fw_write_wait = true; break; default: @@ -3831,6 +3974,9 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) case CHIP_DIMGREY_CAVEFISH: chip_name = "dimgrey_cavefish"; break; + case CHIP_BEIGE_GOBY: + chip_name = "beige_goby"; + break; default: BUG(); } @@ -4399,6 +4545,7 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -4459,9 +4606,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; - r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type, - AMDGPU_RING_PRIO_DEFAULT); + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, + AMDGPU_RING_PRIO_DEFAULT, NULL); if (r) return r; return 0; @@ -4495,8 +4641,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; /* type-2 packets are deprecated on MEC, use type-3 instead */ - r = amdgpu_ring_init(adev, ring, 1024, - &adev->gfx.eop_irq, irq_type, hw_prio); + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, + hw_prio, NULL); if (r) return r; @@ -4524,6 +4670,7 @@ static int gfx_v10_0_sw_init(void *handle) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; @@ -6031,6 +6178,7 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, DOORBELL_RANGE_LOWER_Sienna_Cichlid, ring->doorbell_index); WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp); @@ -6166,6 +6314,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, 0); break; default: @@ -6178,6 +6327,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); @@ -6274,6 +6424,7 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid); tmp &= 0xffffff00; tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); @@ -6988,6 +7139,7 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid); WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, 0); WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern); @@ -7022,6 +7174,9 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) { uint32_t data; + if (amdgpu_sriov_vf(adev)) + return; + /* initialize cam_index to 0 * index will auto-inc after each data writting */ WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0); @@ -7031,6 +7186,7 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) << GRBM_CAM_DATA__CAM_ADDR__SHIFT) | @@ -7172,16 +7328,10 @@ static int gfx_v10_0_hw_init(void *handle) * loaded firstly, so in direct type, it has to load smc ucode * here before rlc. */ - if (adev->smu.ppt_funcs != NULL && !(adev->flags & AMD_IS_APU)) { - r = smu_load_microcode(&adev->smu); + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_pm_load_smu_firmware(adev, NULL); if (r) return r; - - r = smu_check_fw_status(&adev->smu); - if (r) { - pr_err("SMC firmware status is not correct\n"); - return r; - } } gfx_v10_0_disable_gpa_mode(adev); } @@ -7246,7 +7396,7 @@ static int gfx_v10_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); - if (!adev->in_pci_err_recovery) { + if (!adev->no_hw_access) { #ifndef BRING_UP_DEBUG if (amdgpu_async_gfx_ring) { r = gfx_v10_0_kiq_disable_kgq(adev); @@ -7346,6 +7496,7 @@ static int gfx_v10_0_soft_reset(void *handle) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY_Sienna_Cichlid)) grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, @@ -7455,6 +7606,7 @@ static int gfx_v10_0_early_init(void *handle) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_Sienna_Cichlid; break; default: @@ -7511,6 +7663,7 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data); /* wait for RLC_SAFE_MODE */ @@ -7545,6 +7698,7 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data); break; default: @@ -7892,6 +8046,7 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = { .start = gfx_v10_0_rlc_start, .update_spm_vmid = gfx_v10_0_update_spm_vmid, .rlcg_wreg = gfx_v10_rlcg_wreg, + .rlcg_rreg = gfx_v10_rlcg_rreg, .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range, }; @@ -7911,6 +8066,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: amdgpu_gfx_off_ctrl(adev, enable); break; case CHIP_VANGOGH: @@ -7939,6 +8095,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle, case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: gfx_v10_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE); break; @@ -9049,6 +9206,7 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs; break; case CHIP_NAVI12: |
