msm: ipa3: Software workaround to fix GSI2.2 HW bug in SM6150

Any allocate GSI channel operation issued from SW
may cause any channel context access on any channel, any EE
it will return wrong address for a very short duration until
the allocate channel operation completes. Make a changes
to allocate all EE GSI channel during device bootup to fix HW bug.

Change-Id: Ic5e9a20ec0eec653a4b9a1346aef826153bfb4b8
Acked-by: Ashok Vuyyuru <avuyyuru@qti.qualcomm.com>
Signed-off-by: Mohammed Javid <mjavid@codeaurora.org>
tirimbino
Mohammed Javid 7 years ago
parent 88532b424b
commit b188a2d9e4
  1. 168
      drivers/platform/msm/gsi/gsi.c
  2. 2
      drivers/platform/msm/gsi/gsi.h
  3. 44
      drivers/platform/msm/ipa/ipa_v3/ipa.c
  4. 1
      drivers/platform/msm/ipa/ipa_v3/ipa_i.h
  5. 2
      drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
  6. 19
      include/linux/msm_gsi.h

@ -2096,31 +2096,36 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
ctx->props = *props;
mutex_lock(&gsi_ctx->mlock);
ee = gsi_ctx->per.ee;
gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
gsi_writel(val, gsi_ctx->base +
GSI_EE_n_GSI_CH_CMD_OFFS(ee));
res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
if (res == 0) {
GSIERR("chan_hdl=%u timed out\n", props->ch_id);
if (gsi_ctx->per.ver != GSI_VER_2_2) {
mutex_lock(&gsi_ctx->mlock);
ee = gsi_ctx->per.ee;
gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
gsi_writel(val, gsi_ctx->base +
GSI_EE_n_GSI_CH_CMD_OFFS(ee));
res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
if (res == 0) {
GSIERR("chan_hdl=%u timed out\n", props->ch_id);
mutex_unlock(&gsi_ctx->mlock);
devm_kfree(gsi_ctx->dev, user_data);
return -GSI_STATUS_TIMED_OUT;
}
if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
GSIERR("chan_hdl=%u allocation failed state=%d\n",
props->ch_id, ctx->state);
mutex_unlock(&gsi_ctx->mlock);
devm_kfree(gsi_ctx->dev, user_data);
return -GSI_STATUS_RES_ALLOC_FAILURE;
}
mutex_unlock(&gsi_ctx->mlock);
devm_kfree(gsi_ctx->dev, user_data);
return -GSI_STATUS_TIMED_OUT;
}
if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
GSIERR("chan_hdl=%u allocation failed state=%d\n",
props->ch_id, ctx->state);
} else {
mutex_lock(&gsi_ctx->mlock);
ctx->state = GSI_CHAN_STATE_ALLOCATED;
mutex_unlock(&gsi_ctx->mlock);
devm_kfree(gsi_ctx->dev, user_data);
return -GSI_STATUS_RES_ALLOC_FAILURE;
}
mutex_unlock(&gsi_ctx->mlock);
erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
GSI_NO_EVT_ERINDEX;
if (erindex != GSI_NO_EVT_ERINDEX) {
@ -2693,31 +2698,40 @@ int gsi_dealloc_channel(unsigned long chan_hdl)
return -GSI_STATUS_UNSUPPORTED_OP;
}
mutex_lock(&gsi_ctx->mlock);
reinit_completion(&ctx->compl);
/*In GSI_VER_2_2 version deallocation channel not supported*/
if (gsi_ctx->per.ver != GSI_VER_2_2) {
mutex_lock(&gsi_ctx->mlock);
reinit_completion(&ctx->compl);
gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
gsi_writel(val, gsi_ctx->base +
GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
if (res == 0) {
GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
mutex_unlock(&gsi_ctx->mlock);
return -GSI_STATUS_TIMED_OUT;
}
if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
ctx->state);
/* Hardware returned incorrect value */
BUG();
}
gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
gsi_writel(val, gsi_ctx->base +
GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
if (res == 0) {
GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
mutex_unlock(&gsi_ctx->mlock);
return -GSI_STATUS_TIMED_OUT;
}
if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
ctx->state);
/* Hardware returned incorrect value */
BUG();
} else {
mutex_lock(&gsi_ctx->mlock);
GSIDBG("In GSI_VER_2_2 channel deallocation not supported\n");
ctx->state = GSI_CHAN_STATE_NOT_ALLOCATED;
GSIDBG("chan_hdl=%lu Channel state = %u\n", chan_hdl,
ctx->state);
mutex_unlock(&gsi_ctx->mlock);
}
mutex_unlock(&gsi_ctx->mlock);
devm_kfree(gsi_ctx->dev, ctx->user_data);
ctx->allocated = false;
if (ctx->evtr)
@ -3527,6 +3541,72 @@ free_lock:
}
EXPORT_SYMBOL(gsi_halt_channel_ee);
int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
{
enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ALLOC_CHANNEL;
struct gsi_chan_ctx *ctx;
uint32_t val;
int res;
if (chan_idx >= gsi_ctx->max_ch || !code) {
GSIERR("bad params chan_idx=%d\n", chan_idx);
return -GSI_STATUS_INVALID_PARAMS;
}
mutex_lock(&gsi_ctx->mlock);
reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
/* invalidate the response */
gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
gsi_writel(val, gsi_ctx->base +
GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
msecs_to_jiffies(GSI_CMD_TIMEOUT));
if (res == 0) {
GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
res = -GSI_STATUS_TIMED_OUT;
goto free_lock;
}
gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES) {
GSIDBG("chan_idx=%u ee=%u out of resources\n", chan_idx, ee);
*code = GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES;
res = -GSI_STATUS_RES_ALLOC_FAILURE;
goto free_lock;
}
if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
GSIERR("No response received\n");
res = -GSI_STATUS_ERROR;
goto free_lock;
}
if (ee == 0) {
ctx = &gsi_ctx->chan[chan_idx];
gsi_ctx->ch_dbg[chan_idx].ch_allocate++;
}
res = GSI_STATUS_SUCCESS;
*code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
free_lock:
mutex_unlock(&gsi_ctx->mlock);
return res;
}
EXPORT_SYMBOL(gsi_alloc_channel_ee);
int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index)
{
if (!gsi_ctx) {

@ -295,6 +295,7 @@ enum gsi_evt_ch_cmd_opcode {
enum gsi_generic_ee_cmd_opcode {
GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
GSI_GEN_EE_CMD_ALLOC_CHANNEL = 0x2,
};
enum gsi_generic_ee_cmd_return_code {
@ -304,6 +305,7 @@ enum gsi_generic_ee_cmd_return_code {
GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4,
GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5,
GSI_GEN_EE_CMD_RETURN_CODE_RETRY = 0x6,
GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES = 0x7,
};
extern struct gsi_ctx *gsi_ctx;

@ -4438,6 +4438,38 @@ static int ipa3_gsi_pre_fw_load_init(void)
return 0;
}
static int ipa3_alloc_gsi_channel(void)
{
const struct ipa_gsi_ep_config *gsi_ep_cfg;
enum ipa_client_type type;
int code = 0;
int ret = 0;
int i;
for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
type = ipa3_get_client_by_pipe(i);
gsi_ep_cfg = ipa3_get_gsi_ep_info(type);
IPADBG("for ep %d client is %d\n", i, type);
if (!gsi_ep_cfg)
continue;
ret = gsi_alloc_channel_ee(gsi_ep_cfg->ipa_gsi_chan_num,
gsi_ep_cfg->ee, &code);
if (ret == GSI_STATUS_SUCCESS) {
IPADBG("alloc gsi ch %d ee %d with code %d\n",
gsi_ep_cfg->ipa_gsi_chan_num,
gsi_ep_cfg->ee,
code);
} else {
IPAERR("failed to alloc ch %d ee %d code %d\n",
gsi_ep_cfg->ipa_gsi_chan_num,
gsi_ep_cfg->ee,
code);
return ret;
}
}
return ret;
}
/**
* ipa3_post_init() - Initialize the IPA Driver (Part II).
* This part contains all initialization which requires interaction with
@ -4665,6 +4697,17 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
goto fail_register_device;
}
IPADBG("IPA gsi is registered\n");
/* GSI 2.2 requires to allocate all EE GSI channel
* during device bootup.
*/
if (ipa3_get_gsi_ver(resource_p->ipa_hw_type) == GSI_VER_2_2) {
result = ipa3_alloc_gsi_channel();
if (result) {
IPAERR("Failed to alloc the GSI channels\n");
result = -ENODEV;
goto fail_alloc_gsi_channel;
}
}
/* setup the AP-IPA pipes */
if (ipa3_setup_apps_pipes()) {
@ -4728,6 +4771,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
fail_teth_bridge_driver_init:
ipa3_teardown_apps_pipes();
fail_alloc_gsi_channel:
fail_setup_apps_pipes:
gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
fail_register_device:

@ -2298,6 +2298,7 @@ void ipa3_proxy_clk_unvote(void);
bool ipa3_is_client_handle_valid(u32 clnt_hdl);
enum ipa_client_type ipa3_get_client_mapping(int pipe_idx);
enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx);
void ipa_init_ep_flt_bitmap(void);

@ -3510,7 +3510,7 @@ enum ipa_client_type ipa3_get_client_mapping(int pipe_idx)
*
* Return value: client type
*/
static enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx)
enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx)
{
int j = 0;

@ -1209,6 +1209,19 @@ int gsi_unmap_base(void);
*/
int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index);
/**
* gsi_alloc_channel_ee - Peripheral should call this function
* to alloc other EE's channel. This is usually done in bootup to allocate all
* chnnels.
*
* @chan_idx: Virtual channel index
* @ee: EE
* @code: [out] response code for operation
* @Return gsi_status
*/
int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
/*
* Here is a typical sequence of calls
*
@ -1456,5 +1469,11 @@ static inline int gsi_map_virtual_ch_to_per_ep(
return -GSI_STATUS_UNSUPPORTED_OP;
}
static inline int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee,
int *code)
{
return -GSI_STATUS_UNSUPPORTED_OP;
}
#endif
#endif

Loading…
Cancel
Save