|
|
|
@ -23,7 +23,6 @@ |
|
|
|
|
#include "adreno.h" |
|
|
|
|
#include "a6xx_reg.h" |
|
|
|
|
#include "adreno_a6xx.h" |
|
|
|
|
#include "adreno_snapshot.h" |
|
|
|
|
#include "adreno_trace.h" |
|
|
|
|
|
|
|
|
|
static const unsigned int a6xx_gmu_gx_registers[] = { |
|
|
|
@ -1538,134 +1537,6 @@ static unsigned int a6xx_gmu_ifpc_show(struct adreno_device *adreno_dev) |
|
|
|
|
gmu->idle_level >= GPU_HW_IFPC; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device, |
|
|
|
|
u8 *buf, size_t remain, void *priv) |
|
|
|
|
{ |
|
|
|
|
struct kgsl_snapshot_gmu_mem *mem_hdr = |
|
|
|
|
(struct kgsl_snapshot_gmu_mem *)buf; |
|
|
|
|
unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr)); |
|
|
|
|
unsigned int i, bytes; |
|
|
|
|
unsigned int *type = priv; |
|
|
|
|
const unsigned int *regs; |
|
|
|
|
|
|
|
|
|
if (*type == GMU_ITCM) |
|
|
|
|
regs = a6xx_gmu_itcm_registers; |
|
|
|
|
else |
|
|
|
|
regs = a6xx_gmu_dtcm_registers; |
|
|
|
|
|
|
|
|
|
bytes = (regs[1] - regs[0] + 1) << 2; |
|
|
|
|
|
|
|
|
|
if (remain < bytes + sizeof(*mem_hdr)) { |
|
|
|
|
SNAPSHOT_ERR_NOMEM(device, "GMU Memory"); |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK; |
|
|
|
|
mem_hdr->hostaddr = 0; |
|
|
|
|
mem_hdr->gmuaddr = gmu_get_memtype_base(KGSL_GMU_DEVICE(device), *type); |
|
|
|
|
mem_hdr->gpuaddr = 0; |
|
|
|
|
|
|
|
|
|
for (i = regs[0]; i <= regs[1]; i++) |
|
|
|
|
kgsl_regread(device, i, data++); |
|
|
|
|
|
|
|
|
|
return bytes + sizeof(*mem_hdr); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
struct gmu_mem_type_desc { |
|
|
|
|
struct gmu_memdesc *memdesc; |
|
|
|
|
uint32_t type; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
static size_t a6xx_snapshot_gmu_mem(struct kgsl_device *device, |
|
|
|
|
u8 *buf, size_t remain, void *priv) |
|
|
|
|
{ |
|
|
|
|
struct kgsl_snapshot_gmu_mem *mem_hdr = |
|
|
|
|
(struct kgsl_snapshot_gmu_mem *)buf; |
|
|
|
|
struct gmu_mem_type_desc *desc = priv; |
|
|
|
|
unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr)); |
|
|
|
|
|
|
|
|
|
if (priv == NULL) |
|
|
|
|
return 0; |
|
|
|
|
|
|
|
|
|
if (remain < desc->memdesc->size + sizeof(*mem_hdr)) { |
|
|
|
|
KGSL_CORE_ERR( |
|
|
|
|
"snapshot: Not enough memory for the gmu section %d\n", |
|
|
|
|
desc->type); |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
memset(mem_hdr, 0, sizeof(*mem_hdr)); |
|
|
|
|
mem_hdr->type = desc->type; |
|
|
|
|
mem_hdr->hostaddr = (uintptr_t)desc->memdesc->hostptr; |
|
|
|
|
mem_hdr->gmuaddr = desc->memdesc->gmuaddr; |
|
|
|
|
mem_hdr->gpuaddr = 0; |
|
|
|
|
|
|
|
|
|
/* Just copy the ringbuffer, there are no active IBs */ |
|
|
|
|
memcpy(data, desc->memdesc->hostptr, desc->memdesc->size); |
|
|
|
|
|
|
|
|
|
return desc->memdesc->size + sizeof(*mem_hdr); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* a6xx_gmu_snapshot() - A6XX GMU snapshot function |
|
|
|
|
* @adreno_dev: Device being snapshotted |
|
|
|
|
* @snapshot: Pointer to the snapshot instance |
|
|
|
|
* |
|
|
|
|
* This is where all of the A6XX GMU specific bits and pieces are grabbed |
|
|
|
|
* into the snapshot memory |
|
|
|
|
*/ |
|
|
|
|
static void a6xx_gmu_snapshot(struct adreno_device *adreno_dev, |
|
|
|
|
struct kgsl_snapshot *snapshot) |
|
|
|
|
{ |
|
|
|
|
struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
|
|
|
|
struct gmu_device *gmu = KGSL_GMU_DEVICE(device); |
|
|
|
|
bool gx_on; |
|
|
|
|
struct gmu_mem_type_desc desc[] = { |
|
|
|
|
{gmu->hfi_mem, SNAPSHOT_GMU_MEM_HFI}, |
|
|
|
|
{gmu->persist_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK}, |
|
|
|
|
{gmu->icache_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK}, |
|
|
|
|
{gmu->dcache_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK}, |
|
|
|
|
{gmu->gmu_log, SNAPSHOT_GMU_MEM_LOG}, |
|
|
|
|
{gmu->dump_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK} }; |
|
|
|
|
unsigned int val, i; |
|
|
|
|
enum gmu_mem_type type; |
|
|
|
|
|
|
|
|
|
if (!gmu_core_isenabled(device)) |
|
|
|
|
return; |
|
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(desc); i++) { |
|
|
|
|
if (desc[i].memdesc) |
|
|
|
|
kgsl_snapshot_add_section(device, |
|
|
|
|
KGSL_SNAPSHOT_SECTION_GMU_MEMORY, |
|
|
|
|
snapshot, a6xx_snapshot_gmu_mem, |
|
|
|
|
&desc[i]); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
type = GMU_ITCM; |
|
|
|
|
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY, |
|
|
|
|
snapshot, a6xx_snapshot_gmu_tcm, &type); |
|
|
|
|
type = GMU_DTCM; |
|
|
|
|
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY, |
|
|
|
|
snapshot, a6xx_snapshot_gmu_tcm, &type); |
|
|
|
|
|
|
|
|
|
adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers, |
|
|
|
|
ARRAY_SIZE(a6xx_gmu_registers) / 2); |
|
|
|
|
|
|
|
|
|
gx_on = a6xx_gmu_gx_is_on(adreno_dev); |
|
|
|
|
|
|
|
|
|
if (gx_on) { |
|
|
|
|
/* Set fence to ALLOW mode so registers can be read */ |
|
|
|
|
kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0); |
|
|
|
|
kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val); |
|
|
|
|
|
|
|
|
|
KGSL_DRV_ERR(device, "set FENCE to ALLOW mode:%x\n", val); |
|
|
|
|
adreno_snapshot_registers(device, snapshot, |
|
|
|
|
a6xx_gmu_gx_registers, |
|
|
|
|
ARRAY_SIZE(a6xx_gmu_gx_registers) / 2); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int a6xx_gmu_wait_for_active_transition( |
|
|
|
|
struct adreno_device *adreno_dev) |
|
|
|
|
{ |
|
|
|
@ -1754,7 +1625,6 @@ struct gmu_dev_ops adreno_a6xx_gmudev = { |
|
|
|
|
.wait_for_gmu_idle = a6xx_gmu_wait_for_idle, |
|
|
|
|
.ifpc_store = a6xx_gmu_ifpc_store, |
|
|
|
|
.ifpc_show = a6xx_gmu_ifpc_show, |
|
|
|
|
.snapshot = a6xx_gmu_snapshot, |
|
|
|
|
.wait_for_active_transition = a6xx_gmu_wait_for_active_transition, |
|
|
|
|
.is_initialized = a6xx_gmu_is_initialized, |
|
|
|
|
.read_ao_counter = a6xx_gmu_read_ao_counter, |
|
|
|
|