msm: ipa4: flow control changes for rmnet pipe

When the RMNET pipe channel in the started state receiving the
junk packets, it leading IPA HW stall. Add changes to support
the new flow control channel state for rmnet pipe, it avoids
entering the junk packet to IPA HW.

Change-Id: If2b3992e8cdc93343e70f9d2c1783bbbde329bc2
Signed-off-by: Ashok Vuyyuru <avuyyuru@codeaurora.org>
tirimbino
Ashok Vuyyuru 5 years ago
parent 11ae02d078
commit 2e4061b588
  1. 94
      drivers/platform/msm/gsi/gsi.c
  2. 5
      drivers/platform/msm/gsi/gsi.h
  3. 43
      drivers/platform/msm/ipa/ipa_v3/ipa_client.c
  4. 2
      drivers/platform/msm/ipa/ipa_v3/ipa_i.h
  5. 86
      drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
  6. 23
      include/linux/msm_gsi.h

@ -1,4 +1,4 @@
/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -4287,7 +4287,6 @@ free_lock:
}
EXPORT_SYMBOL(gsi_alloc_channel_ee);
int gsi_chk_intset_value(void)
{
uint32_t val;
@ -4298,6 +4297,97 @@ int gsi_chk_intset_value(void)
}
EXPORT_SYMBOL(gsi_chk_intset_value);
int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
int *code)
{
enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL;
uint32_t val;
enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
int res;
if (!gsi_ctx) {
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
if (chan_idx >= gsi_ctx->max_ch || !code) {
GSIERR("bad params chan_idx=%d\n", chan_idx);
return -GSI_STATUS_INVALID_PARAMS;
}
mutex_lock(&gsi_ctx->mlock);
reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
/* invalidate the response */
gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
gsi_ctx->gen_ee_cmd_dbg.flow_ctrl_channel++;
val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
gsi_writel(val, gsi_ctx->base +
GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
msecs_to_jiffies(GSI_CMD_TIMEOUT));
if (res == 0) {
GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
res = -GSI_STATUS_TIMED_OUT;
goto free_lock;
}
gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING) {
GSIDBG("chan_idx=%u ee=%u not in correct state\n",
chan_idx, ee);
*code = GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING;
res = -GSI_STATUS_RES_ALLOC_FAILURE;
goto free_lock;
} else if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE ||
gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX){
GSIERR("chan_idx=%u ee=%u not in correct state\n",
chan_idx, ee);
BUG();
}
if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
GSIERR("No response received\n");
res = -GSI_STATUS_ERROR;
goto free_lock;
}
/*Reading current channel state*/
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_idx, ee));
curr_state = (val &
GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
if (curr_state == GSI_CHAN_STATE_FLOW_CONTROL) {
GSIDBG("ch %u state updated to %u\n", chan_idx, curr_state);
res = GSI_STATUS_SUCCESS;
} else {
GSIERR("ch %u state updated to %u incorrect state\n",
chan_idx, curr_state);
res = -GSI_STATUS_ERROR;
}
*code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
free_lock:
mutex_unlock(&gsi_ctx->mlock);
return res;
}
EXPORT_SYMBOL(gsi_enable_flow_control_ee);
int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index)
{
if (!gsi_ctx) {

@ -1,4 +1,4 @@
/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -201,6 +201,7 @@ struct ch_debug_stats {
struct gsi_generic_ee_cmd_debug_stats {
unsigned long halt_channel;
unsigned long flow_ctrl_channel;
};
struct gsi_ctx {
@ -329,6 +330,8 @@ enum gsi_evt_ch_cmd_opcode {
enum gsi_generic_ee_cmd_opcode {
GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
GSI_GEN_EE_CMD_ALLOC_CHANNEL = 0x2,
GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL = 0x3,
GSI_GEN_EE_CMD_DISABLE_FLOW_CHANNEL = 0x4,
};
enum gsi_generic_ee_cmd_return_code {

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -590,6 +590,15 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
IPADBG("ep configuration successful\n");
} else {
IPADBG("Skipping endpoint configuration.\n");
if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[ipa_ep_idx].client) &&
ipa3_ctx->ep[ipa_ep_idx].client == IPA_CLIENT_USB_PROD
&& !ipa3_is_mhip_offload_enabled()) {
if (ipa3_cfg_ep_seq(ipa_ep_idx,
&params->ipa_ep_cfg.seq)) {
IPAERR("fail to configure USB pipe seq\n");
goto ipa_cfg_ep_fail;
}
}
}
out_params->clnt_hdl = ipa_ep_idx;
@ -779,6 +788,7 @@ int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
int result = -EFAULT;
enum gsi_status gsi_res;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
int code = 0;
IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
@ -821,6 +831,20 @@ int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
IPAERR("Error starting channel: %d\n", gsi_res);
goto write_chan_scratch_fail;
}
if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg &&
ipa3_ctx->ipa_endp_delay_wa &&
!ipa3_is_mhip_offload_enabled()) {
gsi_res = gsi_enable_flow_control_ee(ep->gsi_chan_hdl, 0,
&code);
if (gsi_res == GSI_STATUS_SUCCESS) {
IPADBG("flow control sussess gsi ch %d with code %d\n",
ep->gsi_chan_hdl, code);
} else {
IPADBG("failed to flow control gsi ch %d code %d\n",
ep->gsi_chan_hdl, code);
}
}
ipa3_start_gsi_debug_monitor(clnt_hdl);
if (!ep->keep_ipa_awake)
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
@ -1232,6 +1256,7 @@ int ipa3_start_stop_client_prod_gsi_chnl(enum ipa_client_type client,
int result = 0;
int pipe_idx;
struct ipa3_ep_context *ep;
int code = 0;
if (IPA_CLIENT_IS_CONS(client)) {
IPAERR("client (%d) not PROD\n", client);
@ -1247,10 +1272,20 @@ int ipa3_start_stop_client_prod_gsi_chnl(enum ipa_client_type client,
client_lock_unlock_cb(client, true);
ep = &ipa3_ctx->ep[pipe_idx];
if (ep->valid && ep->skip_ep_cfg && ipa3_get_teth_port_status(client)) {
if (start_chnl)
if (ep->valid && ep->skip_ep_cfg && ipa3_get_teth_port_status(client)
&& !ipa3_is_mhip_offload_enabled()) {
if (start_chnl) {
result = ipa3_start_gsi_channel(pipe_idx);
else
result = gsi_enable_flow_control_ee(ep->gsi_chan_hdl,
0, &code);
if (result == GSI_STATUS_SUCCESS) {
IPADBG("flow control sussess ch %d code %d\n",
ep->gsi_chan_hdl, code);
} else {
IPADBG("failed to flow control ch %d code %d\n",
ep->gsi_chan_hdl, code);
}
} else
result = ipa3_stop_gsi_channel(pipe_idx);
}
client_lock_unlock_cb(client, false);

@ -2352,6 +2352,8 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl);
*/
int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
int ipa3_cfg_ep_seq(u32 clnt_hdl, const struct ipa_ep_cfg_seq *seq_cfg);
int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
int ipa3_cfg_ep_conn_track(u32 clnt_hdl,

@ -189,7 +189,8 @@ static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client,
}
static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
int ipa_ep_idx, struct start_gsi_channel *params)
int ipa_ep_idx, struct start_gsi_channel *params,
struct ipa_ep_cfg *ipa_ep_cfg)
{
int res = 0;
struct gsi_evt_ring_props ev_props;
@ -200,6 +201,7 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
const struct ipa_gsi_ep_config *ep_cfg;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
bool burst_mode_enabled = false;
int code = 0;
IPA_MHI_FUNC_ENTRY();
@ -349,6 +351,37 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
*params->mhi = ch_scratch.mhi;
res = ipa3_enable_data_path(ipa_ep_idx);
if (res) {
IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
ipa_ep_idx);
goto fail_ep_cfg;
}
if (!ep->skip_ep_cfg) {
if (ipa3_cfg_ep(ipa_ep_idx, ipa_ep_cfg)) {
IPAERR("fail to configure EP.\n");
goto fail_ep_cfg;
}
if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
IPAERR("fail to configure status of EP.\n");
goto fail_ep_cfg;
}
IPA_MHI_DBG("ep configuration successful\n");
} else {
IPA_MHI_DBG("skipping ep configuration\n");
if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[ipa_ep_idx].client) &&
ipa3_ctx->ep[ipa_ep_idx].client == IPA_CLIENT_MHI_PROD
&& !ipa3_is_mhip_offload_enabled()) {
if (ipa3_cfg_ep_seq(ipa_ep_idx,
&ipa_ep_cfg->seq)) {
IPA_MHI_ERR("fail to configure USB pipe seq\n");
goto fail_ep_cfg;
}
}
}
if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) {
memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_delay = true;
@ -363,6 +396,9 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
ep->ep_delay_set = false;
}
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
ipa3_install_dflt_flt_rules(ipa_ep_idx);
IPA_MHI_DBG("Starting channel\n");
res = gsi_start_channel(ep->gsi_chan_hdl);
if (res) {
@ -370,9 +406,24 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
goto fail_ch_start;
}
if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg &&
ipa3_ctx->ipa_endp_delay_wa &&
!ipa3_is_mhip_offload_enabled()) {
res = gsi_enable_flow_control_ee(ep->gsi_chan_hdl, 0, &code);
if (res == GSI_STATUS_SUCCESS) {
IPA_MHI_DBG("flow ctrl sussess gsi ch %d code %d\n",
ep->gsi_chan_hdl, code);
} else {
IPA_MHI_DBG("failed to flow ctrll gsi ch %d code %d\n",
ep->gsi_chan_hdl, code);
}
}
IPA_MHI_FUNC_EXIT();
return 0;
fail_ep_cfg:
ipa3_disable_data_path(ipa_ep_idx);
fail_ch_start:
fail_ch_scratch:
gsi_dealloc_channel(ep->gsi_chan_hdl);
@ -490,49 +541,22 @@ int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
ep->keep_ipa_awake = in->sys->keep_ipa_awake;
res = ipa_mhi_start_gsi_channel(client,
ipa_ep_idx, &in->start.gsi);
ipa_ep_idx, &in->start.gsi,
&in->sys->ipa_ep_cfg);
if (res) {
IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n",
res);
goto fail_start_channel;
}
res = ipa3_enable_data_path(ipa_ep_idx);
if (res) {
IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
ipa_ep_idx);
goto fail_ep_cfg;
}
if (!ep->skip_ep_cfg) {
if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
IPAERR("fail to configure EP.\n");
goto fail_ep_cfg;
}
if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
IPAERR("fail to configure status of EP.\n");
goto fail_ep_cfg;
}
IPA_MHI_DBG("ep configuration successful\n");
} else {
IPA_MHI_DBG("skipping ep configuration\n");
}
*clnt_hdl = ipa_ep_idx;
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
ipa3_install_dflt_flt_rules(ipa_ep_idx);
ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
IPA_MHI_DBG("client %d (ep: %d) connected\n", client,
ipa_ep_idx);
IPA_MHI_DBG("client %d (ep: %d) connected\n", client, ipa_ep_idx);
IPA_MHI_FUNC_EXIT();
return 0;
fail_ep_cfg:
ipa3_disable_data_path(ipa_ep_idx);
fail_start_channel:
memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
return -EPERM;

@ -1,4 +1,4 @@
/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -1619,6 +1619,20 @@ int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
int gsi_chk_intset_value(void);
/**
* gsi_enable_flow_control_ee - Peripheral should call this function
* to enable flow control other EE's channel. This is usually done in USB
* connent and SSR scenarios.
*
* @chan_idx: Virtual channel index
* @ee: EE
* @code: [out] response code for operation
* @Return gsi_status
*/
int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
int *code);
/*
* Here is a typical sequence of calls
*
@ -1882,12 +1896,17 @@ static inline int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee,
return -GSI_STATUS_UNSUPPORTED_OP;
}
static inline int gsi_chk_intset_value(void)
{
return -GSI_STATUS_UNSUPPORTED_OP;
}
static inline int gsi_enable_flow_control_ee(unsigned int chan_idx,
unsigned int ee, int *code)
{
return -GSI_STATUS_UNSUPPORTED_OP;
}
static inline void gsi_wdi3_write_evt_ring_db(
unsigned long chan_hdl, uint32_t db_addr_low,
uint32_t db_addr_high)

Loading…
Cancel
Save