1
0

Merge tag 'ti-driver-soc-for-v6.19' of https://git.kernel.org/pub/scm/linux/kernel/git/ti/linux into soc/drivers

TI SoC driver updates for v6.19

- ti_sci: Add Partial-IO poweroff support and sys_off handler integration
- ti_sci: Gate IO isolation programming on firmware capability flag
- ti_sci: cleanup by replacing ifdeffery in PM ops with pm_sleep_ptr() macro

* tag 'ti-driver-soc-for-v6.19' of https://git.kernel.org/pub/scm/linux/kernel/git/ti/linux:
  firmware: ti_sci: Partial-IO support
  firmware: ti_sci: Support transfers without response
  firmware: ti_sci: Set IO Isolation only if the firmware is capable
  firmware: ti_sci: Replace ifdeffery by pm_sleep_ptr() macro

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann
2025-11-25 11:33:10 +01:00
2 changed files with 135 additions and 27 deletions

View File

@@ -398,6 +398,9 @@ static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
static inline int ti_sci_do_xfer(struct ti_sci_info *info, static inline int ti_sci_do_xfer(struct ti_sci_info *info,
struct ti_sci_xfer *xfer) struct ti_sci_xfer *xfer)
{ {
struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
bool response_expected = !!(hdr->flags & (TI_SCI_FLAG_REQ_ACK_ON_PROCESSED |
TI_SCI_FLAG_REQ_ACK_ON_RECEIVED));
int ret; int ret;
int timeout; int timeout;
struct device *dev = info->dev; struct device *dev = info->dev;
@@ -409,12 +412,12 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
ret = 0; ret = 0;
if (system_state <= SYSTEM_RUNNING) { if (response_expected && system_state <= SYSTEM_RUNNING) {
/* And we wait for the response. */ /* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout)) if (!wait_for_completion_timeout(&xfer->done, timeout))
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
} else { } else if (response_expected) {
/* /*
* If we are !running, we cannot use wait_for_completion_timeout * If we are !running, we cannot use wait_for_completion_timeout
* during noirq phase, so we must manually poll the completion. * during noirq phase, so we must manually poll the completion.
@@ -1670,6 +1673,9 @@ fail:
static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode, static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
u32 ctx_lo, u32 ctx_hi, u32 debug_flags) u32 ctx_lo, u32 ctx_hi, u32 debug_flags)
{ {
u32 msg_flags = mode == TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO ?
TI_SCI_FLAG_REQ_GENERIC_NORESPONSE :
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED;
struct ti_sci_info *info; struct ti_sci_info *info;
struct ti_sci_msg_req_prepare_sleep *req; struct ti_sci_msg_req_prepare_sleep *req;
struct ti_sci_msg_hdr *resp; struct ti_sci_msg_hdr *resp;
@@ -1686,7 +1692,7 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
dev = info->dev; dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP, xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, msg_flags,
sizeof(*req), sizeof(*resp)); sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) { if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer); ret = PTR_ERR(xfer);
@@ -1706,11 +1712,12 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
goto fail; goto fail;
} }
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; if (msg_flags == TI_SCI_FLAG_REQ_ACK_ON_PROCESSED) {
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
if (!ti_sci_is_response_ack(resp)) { if (!ti_sci_is_response_ack(resp)) {
dev_err(dev, "Failed to prepare sleep\n"); dev_err(dev, "Failed to prepare sleep\n");
ret = -ENODEV; ret = -ENODEV;
}
} }
fail: fail:
@@ -3664,6 +3671,78 @@ devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
} }
EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource); EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
/*
* Iterate all device nodes that have a wakeup-source property and check if one
* of the possible phandles points to a Partial-IO system state. If it
* does resolve the device node to an actual device and check if wakeup is
* enabled.
*/
static bool ti_sci_partial_io_wakeup_enabled(struct ti_sci_info *info)
{
struct device_node *wakeup_node = NULL;
for_each_node_with_property(wakeup_node, "wakeup-source") {
struct of_phandle_iterator it;
int err;
of_for_each_phandle(&it, err, wakeup_node, "wakeup-source", NULL, 0) {
struct platform_device *pdev;
bool may_wakeup;
/*
* Continue if idle-state-name is not off-wake. Return
* value is the index of the string which should be 0 if
* off-wake is present.
*/
if (of_property_match_string(it.node, "idle-state-name", "off-wake"))
continue;
pdev = of_find_device_by_node(wakeup_node);
if (!pdev)
continue;
may_wakeup = device_may_wakeup(&pdev->dev);
put_device(&pdev->dev);
if (may_wakeup) {
dev_dbg(info->dev, "%pOF identified as wakeup source for Partial-IO\n",
wakeup_node);
of_node_put(it.node);
of_node_put(wakeup_node);
return true;
}
}
}
return false;
}
static int ti_sci_sys_off_handler(struct sys_off_data *data)
{
struct ti_sci_info *info = data->cb_data;
const struct ti_sci_handle *handle = &info->handle;
bool enter_partial_io = ti_sci_partial_io_wakeup_enabled(info);
int ret;
if (!enter_partial_io)
return NOTIFY_DONE;
dev_info(info->dev, "Entering Partial-IO because a powered wakeup-enabled device was found.\n");
ret = ti_sci_cmd_prepare_sleep(handle, TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO, 0, 0, 0);
if (ret) {
dev_err(info->dev,
"Failed to enter Partial-IO %pe, trying to do an emergency restart\n",
ERR_PTR(ret));
emergency_restart();
}
mdelay(5000);
emergency_restart();
return NOTIFY_DONE;
}
static int tisci_reboot_handler(struct sys_off_data *data) static int tisci_reboot_handler(struct sys_off_data *data)
{ {
struct ti_sci_info *info = data->cb_data; struct ti_sci_info *info = data->cb_data;
@@ -3706,7 +3785,7 @@ static int ti_sci_prepare_system_suspend(struct ti_sci_info *info)
} }
} }
static int __maybe_unused ti_sci_suspend(struct device *dev) static int ti_sci_suspend(struct device *dev)
{ {
struct ti_sci_info *info = dev_get_drvdata(dev); struct ti_sci_info *info = dev_get_drvdata(dev);
struct device *cpu_dev, *cpu_dev_max = NULL; struct device *cpu_dev, *cpu_dev_max = NULL;
@@ -3746,19 +3825,21 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
return 0; return 0;
} }
static int __maybe_unused ti_sci_suspend_noirq(struct device *dev) static int ti_sci_suspend_noirq(struct device *dev)
{ {
struct ti_sci_info *info = dev_get_drvdata(dev); struct ti_sci_info *info = dev_get_drvdata(dev);
int ret = 0; int ret = 0;
ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE); if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) {
if (ret) ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
return ret; if (ret)
return ret;
}
return 0; return 0;
} }
static int __maybe_unused ti_sci_resume_noirq(struct device *dev) static int ti_sci_resume_noirq(struct device *dev)
{ {
struct ti_sci_info *info = dev_get_drvdata(dev); struct ti_sci_info *info = dev_get_drvdata(dev);
int ret = 0; int ret = 0;
@@ -3767,9 +3848,11 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
u8 pin; u8 pin;
u8 mode; u8 mode;
ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE); if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) {
if (ret) ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
return ret; if (ret)
return ret;
}
ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode); ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode);
/* Do not fail to resume on error as the wake reason is not critical */ /* Do not fail to resume on error as the wake reason is not critical */
@@ -3780,7 +3863,7 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
return 0; return 0;
} }
static void __maybe_unused ti_sci_pm_complete(struct device *dev) static void ti_sci_pm_complete(struct device *dev)
{ {
struct ti_sci_info *info = dev_get_drvdata(dev); struct ti_sci_info *info = dev_get_drvdata(dev);
@@ -3791,12 +3874,10 @@ static void __maybe_unused ti_sci_pm_complete(struct device *dev)
} }
static const struct dev_pm_ops ti_sci_pm_ops = { static const struct dev_pm_ops ti_sci_pm_ops = {
#ifdef CONFIG_PM_SLEEP .suspend = pm_sleep_ptr(ti_sci_suspend),
.suspend = ti_sci_suspend, .suspend_noirq = pm_sleep_ptr(ti_sci_suspend_noirq),
.suspend_noirq = ti_sci_suspend_noirq, .resume_noirq = pm_sleep_ptr(ti_sci_resume_noirq),
.resume_noirq = ti_sci_resume_noirq, .complete = pm_sleep_ptr(ti_sci_pm_complete),
.complete = ti_sci_pm_complete,
#endif
}; };
/* Description for K2G */ /* Description for K2G */
@@ -3928,11 +4009,12 @@ static int ti_sci_probe(struct platform_device *pdev)
} }
ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps); ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s\n", dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s%s\n",
info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "", info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "", info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "", info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "",
info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "" info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "",
info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION ? " IO-Isolation" : ""
); );
ti_sci_setup_ops(info); ti_sci_setup_ops(info);
@@ -3943,6 +4025,19 @@ static int ti_sci_probe(struct platform_device *pdev)
goto out; goto out;
} }
if (info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO) {
ret = devm_register_sys_off_handler(dev,
SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_FIRMWARE,
ti_sci_sys_off_handler,
info);
if (ret) {
dev_err(dev, "Failed to register sys_off_handler %pe\n",
ERR_PTR(ret));
goto out;
}
}
dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
info->handle.version.abi_major, info->handle.version.abi_minor, info->handle.version.abi_major, info->handle.version.abi_minor,
info->handle.version.firmware_revision, info->handle.version.firmware_revision,
@@ -3952,7 +4047,13 @@ static int ti_sci_probe(struct platform_device *pdev)
list_add_tail(&info->node, &ti_sci_list); list_add_tail(&info->node, &ti_sci_list);
mutex_unlock(&ti_sci_list_mutex); mutex_unlock(&ti_sci_list_mutex);
return of_platform_populate(dev->of_node, NULL, NULL, dev); ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "platform_populate failed %pe\n", ERR_PTR(ret));
goto out;
}
return 0;
out: out:
if (!IS_ERR(info->chan_tx)) if (!IS_ERR(info->chan_tx))
mbox_free_channel(info->chan_tx); mbox_free_channel(info->chan_tx);

View File

@@ -149,6 +149,7 @@ struct ti_sci_msg_req_reboot {
* MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM * MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM
* MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM * MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM
* MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM * MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM
* MSG_FLAG_CAPS_IO_ISOLATION: IO Isolation support
* *
* Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS * Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS
* providing currently available SOC/firmware capabilities. SoC that don't * providing currently available SOC/firmware capabilities. SoC that don't
@@ -160,6 +161,7 @@ struct ti_sci_msg_resp_query_fw_caps {
#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4) #define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4)
#define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5) #define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5)
#define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9) #define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9)
#define MSG_FLAG_CAPS_IO_ISOLATION TI_SCI_MSG_FLAG(7)
#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1) #define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1)
u64 fw_caps; u64 fw_caps;
} __packed; } __packed;
@@ -595,6 +597,11 @@ struct ti_sci_msg_resp_get_clock_freq {
struct ti_sci_msg_req_prepare_sleep { struct ti_sci_msg_req_prepare_sleep {
struct ti_sci_msg_hdr hdr; struct ti_sci_msg_hdr hdr;
/*
* When sending prepare_sleep with MODE_PARTIAL_IO no response will be sent,
* no further steps are required.
*/
#define TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO 0x03
#define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd #define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd
u8 mode; u8 mode;
u32 ctx_lo; u32 ctx_lo;