adreno: disable snapshot and coresight

Signed-off-by: Park Ju Hyung <qkrwngud825@gmail.com>
Signed-off-by: Alexander Winkowski <dereference23@outlook.com>
fourteen
Park Ju Hyung 6 years ago committed by Jenna
parent 4d8b75cd75
commit 4d5e853887
  1. 7
      drivers/gpu/msm/Makefile
  2. 15
      drivers/gpu/msm/adreno.c
  3. 12
      drivers/gpu/msm/adreno.h
  4. 8
      drivers/gpu/msm/adreno_a6xx.c
  5. 2
      drivers/gpu/msm/adreno_a6xx.h
  6. 130
      drivers/gpu/msm/adreno_a6xx_gmu.c
  7. 19
      drivers/gpu/msm/adreno_a6xx_rgmu.c
  8. 3
      drivers/gpu/msm/adreno_cp_parser.c
  9. 11
      drivers/gpu/msm/kgsl_device.h
  10. 4
      drivers/gpu/msm/kgsl_drawobj.c

@ -9,7 +9,6 @@ msm_kgsl_core-y = \
kgsl_pwrctrl.o \
kgsl_pwrscale.o \
kgsl_mmu.o \
kgsl_snapshot.o \
kgsl_events.o \
kgsl_pool.o \
kgsl_gmu_core.o \
@ -27,17 +26,11 @@ msm_adreno-y += \
adreno_ringbuffer.o \
adreno_drawctxt.o \
adreno_dispatch.o \
adreno_snapshot.o \
adreno_coresight.o \
adreno_trace.o \
adreno_a3xx.o \
adreno_a4xx.o \
adreno_a5xx.o \
adreno_a6xx.o \
adreno_a3xx_snapshot.o \
adreno_a4xx_snapshot.o \
adreno_a5xx_snapshot.o \
adreno_a6xx_snapshot.o \
adreno_a4xx_preempt.o \
adreno_a5xx_preempt.o \
adreno_a6xx_preempt.o \

@ -559,8 +559,6 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
tmp &= ~BIT(i);
}
gpudev->irq_trace(adreno_dev, status);
/*
* Clear ADRENO_INT_RBBM_AHB_ERROR bit after this interrupt has been
* cleared in its respective handler
@ -1309,9 +1307,6 @@ static int adreno_probe(struct platform_device *pdev)
kgsl_pwrscale_init(&pdev->dev, CONFIG_QCOM_ADRENO_DEFAULT_GOVERNOR);
/* Initialize coresight for the target */
adreno_coresight_init(adreno_dev);
/* Get the system cache slice descriptor for GPU */
adreno_dev->gpu_llc_slice = adreno_llc_getd(&pdev->dev, "gpu");
if (IS_ERR(adreno_dev->gpu_llc_slice) &&
@ -1382,7 +1377,6 @@ static int adreno_remove(struct platform_device *pdev)
adreno_sysfs_close(adreno_dev);
adreno_coresight_remove(adreno_dev);
adreno_profile_close(adreno_dev);
/* Release the system cache slice descriptor */
@ -2007,9 +2001,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
*/
adreno_llc_setup(device);
/* Re-initialize the coresight registers if applicable */
adreno_coresight_start(adreno_dev);
adreno_irqctrl(adreno_dev, 1);
adreno_perfcounter_start(adreno_dev);
@ -2150,9 +2141,6 @@ static int adreno_stop(struct kgsl_device *device)
adreno_llc_deactivate_slice(adreno_dev->gpu_llc_slice);
adreno_llc_deactivate_slice(adreno_dev->gpuhtw_llc_slice);
/* Save active coresight registers if applicable */
adreno_coresight_stop(adreno_dev);
/* Save physical performance counter values before GPU power down*/
adreno_perfcounter_save(adreno_dev);
@ -3002,9 +2990,6 @@ int adreno_soft_reset(struct kgsl_device *device)
/* Reinitialize the GPU */
gpudev->start(adreno_dev);
/* Re-initialize the coresight registers if applicable */
adreno_coresight_start(adreno_dev);
/* Enable IRQ */
adreno_irqctrl(adreno_dev, 1);

@ -596,7 +596,6 @@ struct adreno_device {
unsigned int speed_bin;
unsigned int quirks;
struct coresight_device *csdev[GPU_CORESIGHT_MAX];
uint32_t gpmu_throttle_counters[ADRENO_GPMU_THROTTLE_COUNTERS];
struct work_struct irq_storm_work;
@ -980,9 +979,6 @@ struct adreno_gpudev {
struct adreno_perfcounters *perfcounters;
const struct adreno_invalid_countables *invalid_countables;
struct adreno_snapshot_data *snapshot_data;
struct adreno_coresight *coresight[GPU_CORESIGHT_MAX];
struct adreno_irq *irq;
int num_prio_levels;
@ -991,8 +987,6 @@ struct adreno_gpudev {
unsigned int gbif_arb_halt_mask;
unsigned int gbif_gx_halt_mask;
/* GPU specific function hooks */
void (*irq_trace)(struct adreno_device *, unsigned int status);
void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *);
void (*platform_setup)(struct adreno_device *);
void (*init)(struct adreno_device *);
void (*remove)(struct adreno_device *);
@ -1047,8 +1041,6 @@ struct adreno_gpudev {
int (*perfcounter_update)(struct adreno_device *adreno_dev,
struct adreno_perfcount_register *reg,
bool update_reg);
size_t (*snapshot_preemption)(struct kgsl_device *, u8 *,
size_t, void *);
void (*zap_shader_unload)(struct adreno_device *);
int (*secure_pt_hibernate)(struct adreno_device *);
int (*secure_pt_restore)(struct adreno_device *);
@ -1165,9 +1157,9 @@ void adreno_shadermem_regread(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value);
void adreno_snapshot(struct kgsl_device *device,
static inline void adreno_snapshot(struct kgsl_device *device,
struct kgsl_snapshot *snapshot,
struct kgsl_context *context);
struct kgsl_context *context) {}
int adreno_reset(struct kgsl_device *device, int fault);

@ -503,8 +503,6 @@ static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev)
static void a6xx_init(struct adreno_device *adreno_dev)
{
a6xx_crashdump_init(adreno_dev);
/*
* If the GMU is not enabled, rewrite the offset for the always on
* counters to point to the CP always on instead of GMU always on
@ -1857,6 +1855,7 @@ static struct adreno_irq a6xx_irq = {
.mask = A6XX_INT_MASK,
};
#if 0
static bool adreno_is_qdss_dbg_register(struct kgsl_device *device,
unsigned int offsetwords)
{
@ -2427,6 +2426,7 @@ static struct adreno_coresight a6xx_coresight_cx = {
.read = adreno_cx_dbgc_regread,
.write = adreno_cx_dbgc_regwrite,
};
#endif
static struct adreno_perfcount_register a6xx_perfcounters_cp[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO,
@ -3354,9 +3354,7 @@ static int a6xx_secure_pt_restore(struct adreno_device *adreno_dev)
struct adreno_gpudev adreno_a6xx_gpudev = {
.reg_offsets = &a6xx_reg_offsets,
.start = a6xx_start,
.snapshot = a6xx_snapshot,
.irq = &a6xx_irq,
.irq_trace = trace_kgsl_a5xx_irq_status,
.num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
.platform_setup = a6xx_platform_setup,
.init = a6xx_init,
@ -3389,9 +3387,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
.ccu_invalidate = a6xx_ccu_invalidate,
.perfcounter_init = a6xx_perfcounter_init,
.perfcounter_update = a6xx_perfcounter_update,
.coresight = {&a6xx_coresight, &a6xx_coresight_cx},
.clk_set_options = a6xx_clk_set_options,
.snapshot_preemption = a6xx_snapshot_preemption,
.zap_shader_unload = a6xx_zap_shader_unload,
.secure_pt_hibernate = a6xx_secure_pt_hibernate,
.secure_pt_restore = a6xx_secure_pt_restore,

@ -169,7 +169,5 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev);
int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev);
void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev);
bool a6xx_gmu_sptprac_is_on(struct adreno_device *adreno_dev);
size_t a6xx_snapshot_preemption(struct kgsl_device *device, u8 *buf,
size_t remain, void *priv);
u64 a6xx_gmu_read_ao_counter(struct kgsl_device *device);
#endif

@ -23,7 +23,6 @@
#include "adreno.h"
#include "a6xx_reg.h"
#include "adreno_a6xx.h"
#include "adreno_snapshot.h"
#include "adreno_trace.h"
static const unsigned int a6xx_gmu_gx_registers[] = {
@ -1538,134 +1537,6 @@ static unsigned int a6xx_gmu_ifpc_show(struct adreno_device *adreno_dev)
gmu->idle_level >= GPU_HW_IFPC;
}
static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device,
u8 *buf, size_t remain, void *priv)
{
struct kgsl_snapshot_gmu_mem *mem_hdr =
(struct kgsl_snapshot_gmu_mem *)buf;
unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr));
unsigned int i, bytes;
unsigned int *type = priv;
const unsigned int *regs;
if (*type == GMU_ITCM)
regs = a6xx_gmu_itcm_registers;
else
regs = a6xx_gmu_dtcm_registers;
bytes = (regs[1] - regs[0] + 1) << 2;
if (remain < bytes + sizeof(*mem_hdr)) {
SNAPSHOT_ERR_NOMEM(device, "GMU Memory");
return 0;
}
mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
mem_hdr->hostaddr = 0;
mem_hdr->gmuaddr = gmu_get_memtype_base(KGSL_GMU_DEVICE(device), *type);
mem_hdr->gpuaddr = 0;
for (i = regs[0]; i <= regs[1]; i++)
kgsl_regread(device, i, data++);
return bytes + sizeof(*mem_hdr);
}
struct gmu_mem_type_desc {
struct gmu_memdesc *memdesc;
uint32_t type;
};
static size_t a6xx_snapshot_gmu_mem(struct kgsl_device *device,
u8 *buf, size_t remain, void *priv)
{
struct kgsl_snapshot_gmu_mem *mem_hdr =
(struct kgsl_snapshot_gmu_mem *)buf;
struct gmu_mem_type_desc *desc = priv;
unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr));
if (priv == NULL)
return 0;
if (remain < desc->memdesc->size + sizeof(*mem_hdr)) {
KGSL_CORE_ERR(
"snapshot: Not enough memory for the gmu section %d\n",
desc->type);
return 0;
}
memset(mem_hdr, 0, sizeof(*mem_hdr));
mem_hdr->type = desc->type;
mem_hdr->hostaddr = (uintptr_t)desc->memdesc->hostptr;
mem_hdr->gmuaddr = desc->memdesc->gmuaddr;
mem_hdr->gpuaddr = 0;
/* Just copy the ringbuffer, there are no active IBs */
memcpy(data, desc->memdesc->hostptr, desc->memdesc->size);
return desc->memdesc->size + sizeof(*mem_hdr);
}
/*
* a6xx_gmu_snapshot() - A6XX GMU snapshot function
* @adreno_dev: Device being snapshotted
* @snapshot: Pointer to the snapshot instance
*
* This is where all of the A6XX GMU specific bits and pieces are grabbed
* into the snapshot memory
*/
static void a6xx_gmu_snapshot(struct adreno_device *adreno_dev,
struct kgsl_snapshot *snapshot)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
bool gx_on;
struct gmu_mem_type_desc desc[] = {
{gmu->hfi_mem, SNAPSHOT_GMU_MEM_HFI},
{gmu->persist_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK},
{gmu->icache_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK},
{gmu->dcache_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK},
{gmu->gmu_log, SNAPSHOT_GMU_MEM_LOG},
{gmu->dump_mem, SNAPSHOT_GMU_MEM_BIN_BLOCK} };
unsigned int val, i;
enum gmu_mem_type type;
if (!gmu_core_isenabled(device))
return;
for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (desc[i].memdesc)
kgsl_snapshot_add_section(device,
KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
snapshot, a6xx_snapshot_gmu_mem,
&desc[i]);
}
type = GMU_ITCM;
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
snapshot, a6xx_snapshot_gmu_tcm, &type);
type = GMU_DTCM;
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
snapshot, a6xx_snapshot_gmu_tcm, &type);
adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
ARRAY_SIZE(a6xx_gmu_registers) / 2);
gx_on = a6xx_gmu_gx_is_on(adreno_dev);
if (gx_on) {
/* Set fence to ALLOW mode so registers can be read */
kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
KGSL_DRV_ERR(device, "set FENCE to ALLOW mode:%x\n", val);
adreno_snapshot_registers(device, snapshot,
a6xx_gmu_gx_registers,
ARRAY_SIZE(a6xx_gmu_gx_registers) / 2);
}
}
static int a6xx_gmu_wait_for_active_transition(
struct adreno_device *adreno_dev)
{
@ -1754,7 +1625,6 @@ struct gmu_dev_ops adreno_a6xx_gmudev = {
.wait_for_gmu_idle = a6xx_gmu_wait_for_idle,
.ifpc_store = a6xx_gmu_ifpc_store,
.ifpc_show = a6xx_gmu_ifpc_show,
.snapshot = a6xx_gmu_snapshot,
.wait_for_active_transition = a6xx_gmu_wait_for_active_transition,
.is_initialized = a6xx_gmu_is_initialized,
.read_ao_counter = a6xx_gmu_read_ao_counter,

@ -22,7 +22,6 @@
#include "a6xx_reg.h"
#include "adreno_a6xx.h"
#include "adreno_trace.h"
#include "adreno_snapshot.h"
/* RGMU timeouts */
#define RGMU_IDLE_TIMEOUT 100 /* ms */
@ -574,23 +573,6 @@ static void a6xx_rgmu_halt_execution(struct kgsl_device *device)
}
/*
* a6xx_rgmu_snapshot() - A6XX GMU snapshot function
* @adreno_dev: Device being snapshotted
* @snapshot: Pointer to the snapshot instance
*
* This is where all of the A6XX GMU specific bits and pieces are grabbed
* into the snapshot memory
*/
static void a6xx_rgmu_snapshot(struct adreno_device *adreno_dev,
struct kgsl_snapshot *snapshot)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
adreno_snapshot_registers(device, snapshot, a6xx_rgmu_registers,
ARRAY_SIZE(a6xx_rgmu_registers) / 2);
}
struct gmu_dev_ops adreno_a6xx_rgmudev = {
.load_firmware = a6xx_rgmu_load_firmware,
.oob_set = a6xx_rgmu_oob_set,
@ -603,7 +585,6 @@ struct gmu_dev_ops adreno_a6xx_rgmudev = {
.wait_for_lowest_idle = a6xx_rgmu_wait_for_lowest_idle,
.ifpc_store = a6xx_rgmu_ifpc_store,
.ifpc_show = a6xx_rgmu_ifpc_show,
.snapshot = a6xx_rgmu_snapshot,
.halt_execution = a6xx_rgmu_halt_execution,
.read_ao_counter = a6xx_gmu_read_ao_counter,
.gmu2host_intr_mask = RGMU_OOB_IRQ_MASK,

@ -801,9 +801,6 @@ static int adreno_cp_parse_ib2(struct kgsl_device *device,
if (ib_level == 2)
return -EINVAL;
/* Save current IB2 statically */
if (ib2base == gpuaddr)
kgsl_snapshot_push_object(process, gpuaddr, dwords);
/*
* only try to find sub objects iff this IB has
* not been processed already

@ -730,10 +730,13 @@ void kgsl_device_platform_remove(struct kgsl_device *device);
const char *kgsl_pwrstate_to_str(unsigned int state);
int kgsl_device_snapshot_init(struct kgsl_device *device);
void kgsl_device_snapshot(struct kgsl_device *device,
struct kgsl_context *context, bool gmu_fault);
void kgsl_device_snapshot_close(struct kgsl_device *device);
static inline int kgsl_device_snapshot_init(struct kgsl_device *device)
{
return 0;
}
static inline void kgsl_device_snapshot(struct kgsl_device *device,
struct kgsl_context *context, bool gmu_fault) {}
static inline void kgsl_device_snapshot_close(struct kgsl_device *device) {}
void kgsl_events_init(void);
void kgsl_events_exit(void);

@ -127,10 +127,6 @@ static void syncobj_timer(unsigned long data)
"kgsl: possible gpu syncpoint deadlock for context %u timestamp %u\n",
drawobj->context->id, drawobj->timestamp);
set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
kgsl_context_dump(drawobj->context);
clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
dev_err(device->dev, " pending events:\n");
for (i = 0; i < syncobj->numsyncs; i++) {

Loading…
Cancel
Save