From 9787922e7cc5e64cb8633d196dc96b5c5a79bf7b Mon Sep 17 00:00:00 2001 From: Sujeev Dias Date: Fri, 11 May 2018 20:09:34 -0700 Subject: [PATCH] mhi: core: add support to get external modem time For accurate synchronizations between external modem and host processor, mhi host will capture modem time relative to host time. Client may use time measurements for adjusting any drift between host and modem. CRs-Fixed: 2246653 Change-Id: I35fceea13e0df3929d4b0efd6b77a8de4211099c Signed-off-by: Sujeev Dias --- Documentation/devicetree/bindings/bus/mhi.txt | 7 + Documentation/mhi.txt | 41 ++ drivers/bus/mhi/core/mhi_init.c | 121 +++++- drivers/bus/mhi/core/mhi_internal.h | 91 ++++- drivers/bus/mhi/core/mhi_main.c | 374 +++++++++++++++--- drivers/bus/mhi/core/mhi_pm.c | 9 +- include/linux/mhi.h | 23 ++ 7 files changed, 606 insertions(+), 60 deletions(-) diff --git a/Documentation/devicetree/bindings/bus/mhi.txt b/Documentation/devicetree/bindings/bus/mhi.txt index 1fcd34471fc2..7c1471430006 100644 --- a/Documentation/devicetree/bindings/bus/mhi.txt +++ b/Documentation/devicetree/bindings/bus/mhi.txt @@ -95,6 +95,13 @@ Main node properties: Value type: Definition: Size of each segment to allocate for BHIe vector table +- mhi,time-sync + Usage: optional + Value type: + Definition: Set true, if the external device support MHI get time + feature for time synchronization between host processor and + external modem. + ========================== mhi event node properties: ========================== diff --git a/Documentation/mhi.txt b/Documentation/mhi.txt index 1c501f18f156..928789936b37 100644 --- a/Documentation/mhi.txt +++ b/Documentation/mhi.txt @@ -137,6 +137,47 @@ Example Operation for data transfer: 8. Host wakes up and check event ring for completion event 9. Host update the Event[i].ctxt.WP to indicate processed of completion event. +Time sync +--------- +To synchronize two applications between host and external modem, MHI provide +native support to get external modems free running timer value in a fast +reliable method. MHI clients do not need to create client specific methods to +get modem time. + +When client requests modem time, MHI host will automatically capture host time +at that moment so clients are able to do accurate drift adjustment. + +Example: + +Client request time @ time T1 + +Host Time: Tx +Modem Time: Ty + +Client request time @ time T2 +Host Time: Txx +Modem Time: Tyy + +Then drift is: +Tyy - Ty + == Txx - Tx + +Clients are free to implement their own drift algorithms, what MHI host provide +is a way to accurately correlate host time with external modem time. + +To avoid link level latencies, controller must support capabilities to disable +any link level latency. + +During Time capture host will: + 1. Capture host time + 2. Trigger doorbell to capture modem time + +It's important time between Step 2 to Step 1 is deterministic as possible. +Therefore, MHI host will: + 1. Disable any MHI related to low power modes. + 2. Disable preemption + 3. Request bus master to disable any link level latencies. Controller + should disable all low power modes such as L0s, L1, L1ss. + MHI States ---------- diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c index 3f1b441fe180..583fb39e6e23 100644 --- a/drivers/bus/mhi/core/mhi_init.c +++ b/drivers/bus/mhi/core/mhi_init.c @@ -414,6 +414,77 @@ error_alloc_chan_ctxt: return ret; } +int mhi_init_timesync(struct mhi_controller *mhi_cntrl) +{ + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + u32 time_offset, db_offset; + int ret; + + reinit_completion(&mhi_tsync->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI host is not in active state\n"); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + + ret = mhi_send_cmd(mhi_cntrl, NULL, MHI_CMD_TIMSYNC_CFG); + if (ret) { + MHI_ERR("Failed to send time sync cfg cmd\n"); + goto error_send_cmd; + } + + ret = wait_for_completion_timeout(&mhi_tsync->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || mhi_tsync->ccs != MHI_EV_CC_SUCCESS) { + MHI_ERR("Failed to receive cmd completion for time_sync_cfg\n"); + ret = -EIO; + goto error_send_cmd; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + + ret = -EIO; + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + goto error_sync_cap; + + ret = mhi_get_capability_offset(mhi_cntrl, TIMESYNC_CAP_ID, + &time_offset); + if (ret) { + MHI_ERR("could not find timesync capability\n"); + goto error_sync_cap; + } + + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, + time_offset + TIMESYNC_DB_OFFSET, &db_offset); + if (ret) + goto error_sync_cap; + + MHI_LOG("TIMESYNC_DB OFFS:0x%x\n", db_offset); + + mhi_tsync->db = mhi_cntrl->regs + db_offset; + +error_sync_cap: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; + +error_send_cmd: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + int mhi_init_mmio(struct mhi_controller *mhi_cntrl) { u32 val; @@ -749,6 +820,9 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, case MHI_ER_CTRL_ELEMENT_TYPE: mhi_event->process_event = mhi_process_ctrl_ev_ring; break; + case MHI_ER_TSYNC_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_tsync_event_ring; + break; } mhi_event->hw_ring = of_property_read_bool(child, "mhi,hw-ev"); @@ -913,6 +987,7 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl, struct device_node *of_node) { int ret; + struct mhi_timesync *mhi_tsync; /* parse firmware image info (optional parameters) */ of_property_read_string(of_node, "mhi,fw-name", &mhi_cntrl->fw_image); @@ -938,9 +1013,35 @@ static int of_parse_dt(struct mhi_controller *mhi_cntrl, if (ret) mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + mhi_cntrl->time_sync = of_property_read_bool(of_node, "mhi,time-sync"); + + if (mhi_cntrl->time_sync) { + mhi_tsync = kzalloc(sizeof(*mhi_tsync), GFP_KERNEL); + if (!mhi_tsync) { + ret = -ENOMEM; + goto error_time_sync; + } + + ret = of_property_read_u32(of_node, "mhi,tsync-er", + &mhi_tsync->er_index); + if (ret) + goto error_time_sync; + + if (mhi_tsync->er_index >= mhi_cntrl->total_ev_rings) { + ret = -EINVAL; + goto error_time_sync; + } + + mhi_cntrl->mhi_tsync = mhi_tsync; + } + return 0; - error_ev_cfg: +error_time_sync: + kfree(mhi_tsync); + kfree(mhi_cntrl->mhi_event); + +error_ev_cfg: kfree(mhi_cntrl->mhi_chan); return ret; @@ -974,6 +1075,11 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) if (ret) return -EINVAL; + if (mhi_cntrl->time_sync && + (!mhi_cntrl->time_get || !mhi_cntrl->lpm_disable || + !mhi_cntrl->lpm_enable)) + return -EINVAL; + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); if (!mhi_cntrl->mhi_cmd) @@ -1016,6 +1122,14 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) rwlock_init(&mhi_chan->lock); } + if (mhi_cntrl->mhi_tsync) { + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + + spin_lock_init(&mhi_tsync->lock); + INIT_LIST_HEAD(&mhi_tsync->head); + init_completion(&mhi_tsync->completion); + } + mhi_cntrl->parent = mhi_bus.dentry; mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR; @@ -1039,6 +1153,7 @@ void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl) kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_event); kfree(mhi_cntrl->mhi_chan); + kfree(mhi_cntrl->mhi_tsync); mutex_lock(&mhi_bus.lock); list_del(&mhi_cntrl->node); @@ -1272,6 +1387,10 @@ static int mhi_driver_remove(struct device *dev) mutex_unlock(&mhi_chan->mutex); } + + if (mhi_cntrl->tsync_dev == mhi_dev) + mhi_cntrl->tsync_dev = NULL; + /* relinquish any pending votes */ read_lock_bh(&mhi_cntrl->pm_lock); while (atomic_read(&mhi_dev->dev_wake)) diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h index 90f9a41a856f..10418979bce0 100644 --- a/drivers/bus/mhi/core/mhi_internal.h +++ b/drivers/bus/mhi/core/mhi_internal.h @@ -130,6 +130,30 @@ extern struct bus_type mhi_bus_type; #define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) #define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) +/* MHI misc capability registers */ +#define MISC_OFFSET (0x24) +#define MISC_CAP_MASK (0xFFFFFFFF) +#define MISC_CAP_SHIFT (0) + +#define CAP_CAPID_MASK (0xFF000000) +#define CAP_CAPID_SHIFT (24) +#define CAP_NEXT_CAP_MASK (0x00FFF000) +#define CAP_NEXT_CAP_SHIFT (12) + +/* MHI Timesync offsets */ +#define TIMESYNC_CFG_OFFSET (0x00) +#define TIMESYNC_CFG_CAPID_MASK (CAP_CAPID_MASK) +#define TIMESYNC_CFG_CAPID_SHIFT (CAP_CAPID_SHIFT) +#define TIMESYNC_CFG_NEXT_OFF_MASK (CAP_NEXT_CAP_MASK) +#define TIMESYNC_CFG_NEXT_OFF_SHIFT (CAP_NEXT_CAP_SHIFT) +#define TIMESYNC_CFG_NUMCMD_MASK (0xFF) +#define TIMESYNC_CFG_NUMCMD_SHIFT (0) +#define TIMESYNC_TIME_LOW_OFFSET (0x4) +#define TIMESYNC_TIME_HIGH_OFFSET (0x8) +#define TIMESYNC_DB_OFFSET (0xC) + +#define TIMESYNC_CAP_ID (2) + /* MHI BHI offfsets */ #define BHI_BHIVERSION_MINOR (0x00) #define BHI_BHIVERSION_MAJOR (0x04) @@ -238,27 +262,44 @@ struct __packed bhi_vec_entry { u64 size; }; +enum mhi_cmd_type { + MHI_CMD_TYPE_NOP = 1, + MHI_CMD_TYPE_RESET = 16, + MHI_CMD_TYPE_STOP = 17, + MHI_CMD_TYPE_START = 18, + MHI_CMD_TYPE_TSYNC = 24, +}; + /* no operation command */ #define MHI_TRE_CMD_NOOP_PTR (0) #define MHI_TRE_CMD_NOOP_DWORD0 (0) -#define MHI_TRE_CMD_NOOP_DWORD1 (1 << 16) +#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_TYPE_NOP << 16) /* channel reset command */ #define MHI_TRE_CMD_RESET_PTR (0) #define MHI_TRE_CMD_RESET_DWORD0 (0) -#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | (16 << 16)) +#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_TYPE_RESET << 16)) /* channel stop command */ #define MHI_TRE_CMD_STOP_PTR (0) #define MHI_TRE_CMD_STOP_DWORD0 (0) -#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | (17 << 16)) +#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | (MHI_CMD_TYPE_STOP << 16)) /* channel start command */ #define MHI_TRE_CMD_START_PTR (0) #define MHI_TRE_CMD_START_DWORD0 (0) -#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | (18 << 16)) +#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_TYPE_START << 16)) + +/* time sync cfg command */ +#define MHI_TRE_CMD_TSYNC_CFG_PTR (0) +#define MHI_TRE_CMD_TSYNC_CFG_DWORD0 (0) +#define MHI_TRE_CMD_TSYNC_CFG_DWORD1(er) ((MHI_CMD_TYPE_TSYNC << 16) | \ + (er << 24)) #define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) /* event descriptor macros */ #define MHI_TRE_EV_PTR(ptr) (ptr) @@ -271,7 +312,8 @@ struct __packed bhi_vec_entry { #define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) #define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) #define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) - +#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) +#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) /* transfer descriptor macros */ #define MHI_TRE_DATA_PTR(ptr) (ptr) @@ -280,11 +322,9 @@ struct __packed bhi_vec_entry { | (ieot << 9) | (ieob << 8) | chain) enum MHI_CMD { - MHI_CMD_NOOP = 0x0, - MHI_CMD_RESET_CHAN = 0x1, - MHI_CMD_STOP_CHAN = 0x2, - MHI_CMD_START_CHAN = 0x3, - MHI_CMD_RESUME_CHAN = 0x4, + MHI_CMD_RESET_CHAN, + MHI_CMD_START_CHAN, + MHI_CMD_TIMSYNC_CFG, }; enum MHI_PKT_TYPE { @@ -298,6 +338,7 @@ enum MHI_PKT_TYPE { MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, MHI_PKT_TYPE_TX_EVENT = 0x22, MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, MHI_PKT_TYPE_STALE_EVENT, /* internal event */ }; @@ -457,7 +498,8 @@ enum MHI_ER_TYPE { enum mhi_er_data_type { MHI_ER_DATA_ELEMENT_TYPE, MHI_ER_CTRL_ELEMENT_TYPE, - MHI_ER_DATA_TYPE_MAX = MHI_ER_CTRL_ELEMENT_TYPE, + MHI_ER_TSYNC_ELEMENT_TYPE, + MHI_ER_DATA_TYPE_MAX = MHI_ER_TSYNC_ELEMENT_TYPE, }; struct db_cfg { @@ -577,6 +619,25 @@ struct mhi_chan { struct list_head node; }; +struct tsync_node { + struct list_head node; + u32 sequence; + u64 local_time; + u64 remote_time; + struct mhi_device *mhi_dev; + void (*cb_func)(struct mhi_device *mhi_dev, u32 sequence, + u64 local_time, u64 remote_time); +}; + +struct mhi_timesync { + u32 er_index; + void __iomem *db; + enum MHI_EV_CCS ccs; + struct completion completion; + spinlock_t lock; + struct list_head head; +}; + struct mhi_bus { struct list_head controller_list; struct mutex lock; @@ -621,6 +682,11 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, u32 event_quota); int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + enum MHI_CMD cmd); +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); /* queue transfer buffer */ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, @@ -657,6 +723,9 @@ void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan); void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum MHI_STATE state); +int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability, + u32 *offset); +int mhi_init_timesync(struct mhi_controller *mhi_cntrl); /* memory allocation methods */ static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c index 5d33457d394d..111682e33c23 100644 --- a/drivers/bus/mhi/core/mhi_main.c +++ b/drivers/bus/mhi/core/mhi_main.c @@ -62,6 +62,40 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, return 0; } +int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, + u32 capability, + u32 *offset) +{ + u32 cur_cap, next_offset; + int ret; + + /* get the 1st supported capability offset */ + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MISC_OFFSET, + MISC_CAP_MASK, MISC_CAP_SHIFT, offset); + if (ret) + return ret; + do { + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_CAPID_MASK, CAP_CAPID_SHIFT, + &cur_cap); + if (ret) + return ret; + + if (cur_cap == capability) + return 0; + + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_NEXT_CAP_MASK, CAP_NEXT_CAP_SHIFT, + &next_offset); + if (ret) + return ret; + + *offset += next_offset; + } while (next_offset); + + return -ENXIO; +} + void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, @@ -503,16 +537,76 @@ void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason) mhi_drv->status_cb(mhi_dev, cb_reason); } +static void mhi_assign_of_node(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev) +{ + struct device_node *controller, *node; + const char *dt_name; + int ret; + + controller = mhi_cntrl->of_node; + for_each_available_child_of_node(controller, node) { + ret = of_property_read_string(node, "mhi,chan", &dt_name); + if (ret) + continue; + if (!strcmp(mhi_dev->chan_name, dt_name)) { + mhi_dev->dev.of_node = node; + break; + } + } +} + +static void mhi_create_time_sync_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + int ret; + + if (!mhi_tsync || !mhi_tsync->db) + return; + + if (mhi_cntrl->ee != MHI_EE_AMSS) + return; + + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) + return; + + mhi_dev->dev_type = MHI_TIMESYNC_TYPE; + mhi_dev->chan_name = "TIME_SYNC"; + dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u_%s", mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_dev->chan_name); + + /* add if there is a matching DT node */ + mhi_assign_of_node(mhi_cntrl, mhi_dev); + + ret = device_add(&mhi_dev->dev); + if (ret) { + MHI_ERR("Failed to register dev for chan:%s\n", + mhi_dev->chan_name); + mhi_dealloc_device(mhi_cntrl, mhi_dev); + return; + } + + mhi_cntrl->tsync_dev = mhi_dev; +} + /* bind mhi channels into mhi devices */ void mhi_create_devices(struct mhi_controller *mhi_cntrl) { int i; struct mhi_chan *mhi_chan; struct mhi_device *mhi_dev; - struct device_node *controller, *node; - const char *dt_name; int ret; + /* + * we need to create time sync device before creating other + * devices, because client may try to capture time during + * clint probe. + */ + mhi_create_time_sync_dev(mhi_cntrl); + mhi_chan = mhi_cntrl->mhi_chan; for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { if (!mhi_chan->configured || mhi_chan->ee != mhi_cntrl->ee) @@ -521,6 +615,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl) if (!mhi_dev) return; + mhi_dev->dev_type = MHI_XFER_TYPE; switch (mhi_chan->dir) { case DMA_TO_DEVICE: mhi_dev->ul_chan = mhi_chan; @@ -569,15 +664,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl) mhi_dev->slot, mhi_dev->chan_name); /* add if there is a matching DT node */ - controller = mhi_cntrl->of_node; - for_each_available_child_of_node(controller, node) { - ret = of_property_read_string(node, "mhi,chan", - &dt_name); - if (ret) - continue; - if (!strcmp(mhi_dev->chan_name, dt_name)) - mhi_dev->dev.of_node = node; - } + mhi_assign_of_node(mhi_cntrl, mhi_dev); ret = device_add(&mhi_dev->dev); if (ret) { @@ -718,6 +805,41 @@ end_process_tx_event: return 0; } +static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, + struct mhi_tre *tre) +{ + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); + struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *mhi_ring = &cmd_ring->ring; + struct mhi_tre *cmd_pkt; + struct mhi_chan *mhi_chan; + struct mhi_timesync *mhi_tsync; + enum mhi_cmd_type type; + u32 chan; + + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); + + /* out of order completion received */ + MHI_ASSERT(cmd_pkt != mhi_ring->rp, "Out of order cmd completion"); + + type = MHI_TRE_GET_CMD_TYPE(cmd_pkt); + + if (type == MHI_CMD_TYPE_TSYNC) { + mhi_tsync = mhi_cntrl->mhi_tsync; + mhi_tsync->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_tsync->completion); + } else { + chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + } + + mhi_del_ring_element(mhi_cntrl, mhi_ring); +} + int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, u32 event_quota) @@ -790,31 +912,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, break; } case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: - { - dma_addr_t ptr = MHI_TRE_GET_EV_PTR(local_rp); - struct mhi_cmd *cmd_ring = - &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; - struct mhi_ring *mhi_ring = &cmd_ring->ring; - struct mhi_tre *cmd_pkt; - struct mhi_chan *mhi_chan; - u32 chan; - - cmd_pkt = mhi_to_virtual(mhi_ring, ptr); - - /* out of order completion received */ - MHI_ASSERT(cmd_pkt != mhi_ring->rp, - "Out of order cmd completion"); - - chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); - - mhi_chan = &mhi_cntrl->mhi_chan[chan]; - write_lock_bh(&mhi_chan->lock); - mhi_chan->ccs = MHI_TRE_GET_EV_CODE(local_rp); - complete(&mhi_chan->completion); - write_unlock_bh(&mhi_chan->lock); - mhi_del_ring_element(mhi_cntrl, mhi_ring); + mhi_process_cmd_completion(mhi_cntrl, local_rp); break; - } case MHI_PKT_TYPE_EE_EVENT: { enum MHI_ST_TRANSITION st = MHI_ST_TRANSITION_MAX; @@ -919,6 +1018,84 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, return count; } +int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + int count = 0; + u32 sequence; + u64 remote_time; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + struct tsync_node *tsync_node; + + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event"); + + sequence = MHI_TRE_GET_EV_SEQ(local_rp); + remote_time = MHI_TRE_GET_EV_TIME(local_rp); + + do { + spin_lock_irq(&mhi_tsync->lock); + tsync_node = list_first_entry_or_null(&mhi_tsync->head, + struct tsync_node, node); + MHI_ASSERT(!tsync_node, "Unexpected Event"); + + if (unlikely(!tsync_node)) + break; + + list_del(&tsync_node->node); + spin_unlock_irq(&mhi_tsync->lock); + + /* + * device may not able to process all time sync commands + * host issue and only process last command it receive + */ + if (tsync_node->sequence == sequence) { + tsync_node->cb_func(tsync_node->mhi_dev, + sequence, + tsync_node->local_time, + remote_time); + kfree(tsync_node); + } else { + kfree(tsync_node); + } + } while (true); + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + void mhi_ev_task(unsigned long data) { struct mhi_event *mhi_event = (struct mhi_event *)data; @@ -1029,23 +1206,22 @@ irqreturn_t mhi_intvec_handlr(int irq_number, void *dev) return IRQ_WAKE_THREAD; } -static int mhi_send_cmd(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, - enum MHI_CMD cmd) +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum MHI_CMD cmd) { struct mhi_tre *cmd_tre = NULL; struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; struct mhi_ring *ring = &mhi_cmd->ring; - int chan = mhi_chan->chan; + int chan = 0; MHI_VERB("Entered, MHI pm_state:%s dev_state:%s ee:%s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state), TO_MHI_STATE_STR(mhi_cntrl->dev_state), TO_MHI_EXEC_STR(mhi_cntrl->ee)); - /* MHI host currently handles RESET and START cmd */ - if (cmd != MHI_CMD_START_CHAN && cmd != MHI_CMD_RESET_CHAN) - return -EINVAL; + if (mhi_chan) + chan = mhi_chan->chan; spin_lock_bh(&mhi_cmd->lock); if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { @@ -1055,16 +1231,26 @@ static int mhi_send_cmd(struct mhi_controller *mhi_cntrl, /* prepare the cmd tre */ cmd_tre = ring->wp; - if (cmd == MHI_CMD_START_CHAN) { - cmd_tre->ptr = MHI_TRE_CMD_START_PTR; - cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; - cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); - } else { + switch (cmd) { + case MHI_CMD_RESET_CHAN: cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); + break; + case MHI_CMD_START_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_START_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); + break; + case MHI_CMD_TIMSYNC_CFG: + cmd_tre->ptr = MHI_TRE_CMD_TSYNC_CFG_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_TSYNC_CFG_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_TSYNC_CFG_DWORD1 + (mhi_cntrl->mhi_tsync->er_index); + break; } + MHI_VERB("WP:0x%llx TRE: 0x%llx 0x%08x 0x%08x\n", (u64)mhi_to_physical(ring, cmd_tre), cmd_tre->ptr, cmd_tre->dword[0], cmd_tre->dword[1]); @@ -1524,3 +1710,97 @@ int mhi_poll(struct mhi_device *mhi_dev, return ret; } EXPORT_SYMBOL(mhi_poll); + + +/** + * mhi_get_remote_time - Get external modem time relative to host time + * Trigger event to capture modem time, also capture host time so client + * can do a relative drift comparision. + * Recommended only tsync device calls this method and do not call this + * from atomic context + * @mhi_dev: Device associated with the channels + * @sequence:unique sequence id track event + * @cb_func: callback function to call back + */ +int mhi_get_remote_time(struct mhi_device *mhi_dev, + u32 sequence, + void (*cb_func)(struct mhi_device *mhi_dev, + u32 sequence, + u64 local_time, + u64 remote_time)) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + struct tsync_node *tsync_node; + int ret; + + /* not all devices support time feature */ + if (!mhi_tsync) + return -EIO; + + /* tsync db can only be rung in M0 state */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + /* + * technically we can use GFP_KERNEL, but wants to avoid + * # of times scheduling out + */ + tsync_node = kzalloc(sizeof(*tsync_node), GFP_ATOMIC); + if (!tsync_node) { + ret = -ENOMEM; + goto error_no_mem; + } + + tsync_node->sequence = sequence; + tsync_node->cb_func = cb_func; + tsync_node->mhi_dev = mhi_dev; + + /* disable link level low power modes */ + mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_invalid_state; + } + + spin_lock_irq(&mhi_tsync->lock); + list_add_tail(&tsync_node->node, &mhi_tsync->head); + spin_unlock_irq(&mhi_tsync->lock); + + /* + * time critical code, delay between these two steps should be + * deterministic as possible. + */ + preempt_disable(); + local_irq_disable(); + + tsync_node->local_time = + mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data); + writel_relaxed_no_log(tsync_node->sequence, mhi_tsync->db); + /* write must go thru immediately */ + wmb(); + + local_irq_enable(); + preempt_enable(); + + ret = 0; + +error_invalid_state: + if (ret) + kfree(tsync_node); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data); + +error_no_mem: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} +EXPORT_SYMBOL(mhi_get_remote_time); diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c index 9349e0f9885a..5ed4e2a0856e 100644 --- a/drivers/bus/mhi/core/mhi_pm.c +++ b/drivers/bus/mhi/core/mhi_pm.c @@ -523,8 +523,13 @@ static int mhi_pm_amss_transition(struct mhi_controller *mhi_cntrl) spin_unlock_irq(&mhi_event->lock); } + read_unlock_bh(&mhi_cntrl->pm_lock); + /* setup support for time sync */ + if (mhi_cntrl->time_sync) + mhi_init_timesync(mhi_cntrl); + MHI_LOG("Adding new devices\n"); /* add supported devices */ @@ -901,6 +906,8 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) mhi_deinit_dev_ctxt(mhi_cntrl); } + if (mhi_cntrl->mhi_tsync) + mhi_cntrl->mhi_tsync->db = NULL; } EXPORT_SYMBOL(mhi_power_down); @@ -1064,7 +1071,7 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl) return 0; } -static int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) { int ret; diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 71eaa9fddc2c..e3f269856cb1 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -18,6 +18,7 @@ struct mhi_ctxt; struct mhi_cmd; struct image_info; struct bhi_vec_entry; +struct mhi_timesync; /** * enum MHI_CB - MHI callback @@ -58,6 +59,16 @@ enum MHI_FLAGS { MHI_CHAIN, }; +/** + * enum mhi_device_type - Device types + * @MHI_XFER_TYPE: Handles data transfer + * @MHI_TIMESYNC_TYPE: Use for timesync feature + */ +enum mhi_device_type { + MHI_XFER_TYPE, + MHI_TIMESYNC_TYPE, +}; + /** * struct image_info - firmware and rddm table table * @mhi_buf - Contain device firmware and rddm table @@ -110,6 +121,9 @@ struct image_info { * @link_status: Query link status in case of abnormal value read from device * @runtime_get: Async runtime resume function * @runtimet_put: Release votes + * @time_get: Return host time in us + * @lpm_disable: Request controller to disable link level low power modes + * @lpm_enable: Controller may enable link level low power modes again * @priv_data: Points to bus master's private data */ struct mhi_controller { @@ -201,10 +215,18 @@ struct mhi_controller { void (*wake_put)(struct mhi_controller *, bool); int (*runtime_get)(struct mhi_controller *, void *); void (*runtime_put)(struct mhi_controller *, void *); + u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv); + void (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv); + void (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv); /* channel to control DTR messaging */ struct mhi_device *dtr_dev; + /* supports time sync feature */ + bool time_sync; + struct mhi_timesync *mhi_tsync; + struct mhi_device *tsync_dev; + /* kernel log level */ enum MHI_DEBUG_LEVEL klog_lvl; @@ -245,6 +267,7 @@ struct mhi_device { struct mhi_chan *ul_chan; struct mhi_chan *dl_chan; atomic_t dev_wake; + enum mhi_device_type dev_type; void *priv_data; int (*ul_xfer)(struct mhi_device *, struct mhi_chan *, void *, size_t, enum MHI_FLAGS);