2017-11-07 06:58:41 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2015-05-22 11:02:08 -07:00
|
|
|
/*
|
|
|
|
* SVC Greybus driver.
|
|
|
|
*
|
|
|
|
* Copyright 2015 Google Inc.
|
|
|
|
* Copyright 2015 Linaro Ltd.
|
|
|
|
*/
|
|
|
|
|
2016-04-20 16:55:08 -07:00
|
|
|
#include <linux/debugfs.h>
|
2022-11-01 14:13:59 -07:00
|
|
|
#include <linux/kstrtox.h>
|
2015-08-06 00:14:55 -07:00
|
|
|
#include <linux/workqueue.h>
|
2019-08-24 22:54:27 -07:00
|
|
|
#include <linux/greybus.h>
|
2015-09-02 08:57:13 -07:00
|
|
|
|
2016-05-06 12:43:53 -07:00
|
|
|
#define SVC_INTF_EJECT_TIMEOUT 9000
|
|
|
|
#define SVC_INTF_ACTIVATE_TIMEOUT 6000
|
2016-08-26 03:59:45 -07:00
|
|
|
#define SVC_INTF_RESUME_TIMEOUT 3000
|
2016-03-09 04:20:46 -07:00
|
|
|
|
2015-12-02 10:23:29 -07:00
|
|
|
struct gb_svc_deferred_request {
|
2015-08-06 00:14:55 -07:00
|
|
|
struct work_struct work;
|
2015-12-02 10:23:29 -07:00
|
|
|
struct gb_operation *operation;
|
2015-08-06 00:14:55 -07:00
|
|
|
};
|
|
|
|
|
greybus: svc: reconfig APBridgeA-Switch link to handle required load
SW-4894, SW-4389, and share a common root cause, namely that
the power-on reset configuration of the APBridgeA-Switch link of PWM
Gear 1, 1 Lane, Slow Auto, is insufficient to handle some required
traffic loads, such as 3 audio streams plus boot-over-UniPro or 4 audio
streams.
The correct long-term solution is to implement a UniPro Power Mode
Manager as in that considers the demands placed on the network,
and adjusts power modes accordingly.
The present commit implements a short-term, brute-force hack to allow
continued system testing:
- Upon receiving an SVC HELLO request, schedule deferred work to
reconfigure the APB1-Switch link to PWM G2, 1 lane, Slow Auto
- When the Camera driver transitions a White Camera module from active to
inactive, return the APB1-Switch link to PWM G2, 1 lane, Slow Auto
The Camera driver already steps up the APBridgeA-Camera link speed while a
camera module is active, which affords sufficient margin for simultaneous
audio and hotplug activity, and the Camera driver already steps down the
link speed thereafter: the change made by the present patch is simply to
tweak the stepped-down power mode to match the new baseline configuration.
Signed-off-by: Mitchell Tasman <tasman@leaflabs.com>
Tested-by: Mark Greer <mgreer@animalcreek.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2016-05-04 14:30:23 -07:00
|
|
|
static int gb_svc_queue_deferred_request(struct gb_operation *operation);
|
|
|
|
|
2015-11-25 07:59:09 -07:00
|
|
|
static ssize_t endo_id_show(struct device *dev,
|
2018-11-25 09:58:15 -07:00
|
|
|
struct device_attribute *attr, char *buf)
|
2015-11-25 07:59:09 -07:00
|
|
|
{
|
|
|
|
struct gb_svc *svc = to_gb_svc(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "0x%04x\n", svc->endo_id);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(endo_id);
|
|
|
|
|
|
|
|
static ssize_t ap_intf_id_show(struct device *dev,
|
2018-11-25 09:58:15 -07:00
|
|
|
struct device_attribute *attr, char *buf)
|
2015-11-25 07:59:09 -07:00
|
|
|
{
|
|
|
|
struct gb_svc *svc = to_gb_svc(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%u\n", svc->ap_intf_id);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(ap_intf_id);
|
|
|
|
|
2016-01-11 06:46:33 -07:00
|
|
|
// FIXME
|
|
|
|
// This is a hack, we need to do this "right" and clean the interface up
|
|
|
|
// properly, not just forcibly yank the thing out of the system and hope for the
|
|
|
|
// best. But for now, people want their modules to come out without having to
|
|
|
|
// throw the thing to the ground or get out a screwdriver.
|
|
|
|
static ssize_t intf_eject_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
struct gb_svc *svc = to_gb_svc(dev);
|
|
|
|
unsigned short intf_id;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kstrtou16(buf, 10, &intf_id);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
|
|
|
|
|
|
|
|
ret = gb_svc_intf_eject(svc, intf_id);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_WO(intf_eject);
|
|
|
|
|
2016-01-26 16:17:08 -07:00
|
|
|
static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct gb_svc *svc = to_gb_svc(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%s\n",
|
|
|
|
gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t watchdog_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
struct gb_svc *svc = to_gb_svc(dev);
|
|
|
|
int retval;
|
|
|
|
bool user_request;
|
|
|
|
|
2022-11-01 14:13:59 -07:00
|
|
|
retval = kstrtobool(buf, &user_request);
|
2016-01-26 16:17:08 -07:00
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
if (user_request)
|
|
|
|
retval = gb_svc_watchdog_enable(svc);
|
|
|
|
else
|
|
|
|
retval = gb_svc_watchdog_disable(svc);
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(watchdog);
|
|
|
|
|
2016-07-26 16:27:28 -07:00
|
|
|
static ssize_t watchdog_action_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct gb_svc *svc = to_gb_svc(dev);
|
|
|
|
|
|
|
|
if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
|
|
|
|
return sprintf(buf, "panic\n");
|
|
|
|
else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
|
|
|
|
return sprintf(buf, "reset\n");
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t watchdog_action_store(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
const char *buf, size_t len)
|
|
|
|
{
|
|
|
|
struct gb_svc *svc = to_gb_svc(dev);
|
|
|
|
|
|
|
|
if (sysfs_streq(buf, "panic"))
|
|
|
|
svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
|
|
|
|
else if (sysfs_streq(buf, "reset"))
|
|
|
|
svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(watchdog_action);
|
|
|
|
|
2016-04-20 16:55:08 -07:00
|
|
|
static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
|
|
|
|
{
|
|
|
|
struct gb_svc_pwrmon_rail_count_get_response response;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = gb_operation_sync(svc->connection,
|
|
|
|
GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
|
|
|
|
&response, sizeof(response));
|
|
|
|
if (ret) {
|
2016-04-21 02:43:36 -07:00
|
|
|
dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
|
2016-04-20 16:55:08 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
*value = response.rail_count;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
|
2016-04-21 02:43:38 -07:00
|
|
|
struct gb_svc_pwrmon_rail_names_get_response *response,
|
|
|
|
size_t bufsize)
|
2016-04-20 16:55:08 -07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = gb_operation_sync(svc->connection,
|
|
|
|
GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
|
|
|
|
response, bufsize);
|
|
|
|
if (ret) {
|
2016-04-21 02:43:36 -07:00
|
|
|
dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
|
2016-04-20 16:55:08 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-06-08 00:39:00 -07:00
|
|
|
if (response->status != GB_SVC_OP_SUCCESS) {
|
|
|
|
dev_err(&svc->dev,
|
|
|
|
"SVC error while getting rail names: %u\n",
|
|
|
|
response->status);
|
|
|
|
return -EREMOTEIO;
|
|
|
|
}
|
|
|
|
|
2016-04-20 16:55:08 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
|
|
|
|
u8 measurement_type, u32 *value)
|
|
|
|
{
|
|
|
|
struct gb_svc_pwrmon_sample_get_request request;
|
|
|
|
struct gb_svc_pwrmon_sample_get_response response;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
request.rail_id = rail_id;
|
|
|
|
request.measurement_type = measurement_type;
|
|
|
|
|
|
|
|
ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
|
|
|
|
&request, sizeof(request),
|
|
|
|
&response, sizeof(response));
|
|
|
|
if (ret) {
|
2016-04-21 02:43:36 -07:00
|
|
|
dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
|
2016-04-20 16:55:08 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (response.result) {
|
|
|
|
dev_err(&svc->dev,
|
|
|
|
"UniPro error while getting rail power sample (%d %d): %d\n",
|
|
|
|
rail_id, measurement_type, response.result);
|
|
|
|
switch (response.result) {
|
|
|
|
case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
|
|
|
|
return -EINVAL;
|
|
|
|
case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
|
2016-04-21 02:43:37 -07:00
|
|
|
return -ENOMSG;
|
2016-04-20 16:55:08 -07:00
|
|
|
default:
|
2016-05-19 07:20:16 -07:00
|
|
|
return -EREMOTEIO;
|
2016-04-20 16:55:08 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*value = le32_to_cpu(response.measurement);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-07 20:15:30 -07:00
|
|
|
int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
|
|
|
|
u8 measurement_type, u32 *value)
|
|
|
|
{
|
|
|
|
struct gb_svc_pwrmon_intf_sample_get_request request;
|
|
|
|
struct gb_svc_pwrmon_intf_sample_get_response response;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
request.intf_id = intf_id;
|
|
|
|
request.measurement_type = measurement_type;
|
|
|
|
|
|
|
|
ret = gb_operation_sync(svc->connection,
|
|
|
|
GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
|
|
|
|
&request, sizeof(request),
|
|
|
|
&response, sizeof(response));
|
|
|
|
if (ret) {
|
2016-04-21 02:43:36 -07:00
|
|
|
dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
|
2016-04-07 20:15:30 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (response.result) {
|
|
|
|
dev_err(&svc->dev,
|
|
|
|
"UniPro error while getting intf power sample (%d %d): %d\n",
|
|
|
|
intf_id, measurement_type, response.result);
|
|
|
|
switch (response.result) {
|
|
|
|
case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
|
|
|
|
return -EINVAL;
|
|
|
|
case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
|
2016-05-19 07:20:14 -07:00
|
|
|
return -ENOMSG;
|
2016-04-07 20:15:30 -07:00
|
|
|
default:
|
2016-05-19 07:20:16 -07:00
|
|
|
return -EREMOTEIO;
|
2016-04-07 20:15:30 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*value = le32_to_cpu(response.measurement);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-25 07:59:09 -07:00
|
|
|
static struct attribute *svc_attrs[] = {
|
|
|
|
&dev_attr_endo_id.attr,
|
|
|
|
&dev_attr_ap_intf_id.attr,
|
2016-01-11 06:46:33 -07:00
|
|
|
&dev_attr_intf_eject.attr,
|
2016-01-26 16:17:08 -07:00
|
|
|
&dev_attr_watchdog.attr,
|
2016-07-26 16:27:28 -07:00
|
|
|
&dev_attr_watchdog_action.attr,
|
2015-11-25 07:59:09 -07:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
ATTRIBUTE_GROUPS(svc);
|
|
|
|
|
2016-03-29 15:56:04 -07:00
|
|
|
int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
|
2015-05-22 11:02:08 -07:00
|
|
|
{
|
|
|
|
struct gb_svc_intf_device_id_request request;
|
|
|
|
|
|
|
|
request.intf_id = intf_id;
|
|
|
|
request.device_id = device_id;
|
|
|
|
|
|
|
|
return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
|
|
|
|
&request, sizeof(request), NULL, 0);
|
|
|
|
}
|
|
|
|
|
2016-01-11 06:46:31 -07:00
|
|
|
int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
|
|
|
|
{
|
|
|
|
struct gb_svc_intf_eject_request request;
|
2016-03-09 04:20:45 -07:00
|
|
|
int ret;
|
2016-01-11 06:46:31 -07:00
|
|
|
|
|
|
|
request.intf_id = intf_id;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The pulse width for module release in svc is long so we need to
|
|
|
|
* increase the timeout so the operation will not return to soon.
|
|
|
|
*/
|
2016-03-09 04:20:45 -07:00
|
|
|
ret = gb_operation_sync_timeout(svc->connection,
|
|
|
|
GB_SVC_TYPE_INTF_EJECT, &request,
|
|
|
|
sizeof(request), NULL, 0,
|
2016-03-09 04:20:46 -07:00
|
|
|
SVC_INTF_EJECT_TIMEOUT);
|
2016-03-09 04:20:45 -07:00
|
|
|
if (ret) {
|
|
|
|
dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2016-01-11 06:46:31 -07:00
|
|
|
}
|
|
|
|
|
2016-04-23 09:47:27 -07:00
|
|
|
int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
|
|
|
|
{
|
2016-05-06 12:43:52 -07:00
|
|
|
struct gb_svc_intf_vsys_request request;
|
|
|
|
struct gb_svc_intf_vsys_response response;
|
|
|
|
int type, ret;
|
|
|
|
|
|
|
|
request.intf_id = intf_id;
|
2016-04-23 09:47:27 -07:00
|
|
|
|
2016-05-06 12:43:52 -07:00
|
|
|
if (enable)
|
|
|
|
type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
|
|
|
|
else
|
|
|
|
type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
|
|
|
|
|
|
|
|
ret = gb_operation_sync(svc->connection, type,
|
2018-11-25 09:58:15 -07:00
|
|
|
&request, sizeof(request),
|
|
|
|
&response, sizeof(response));
|
2016-05-06 12:43:52 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (response.result_code != GB_SVC_INTF_VSYS_OK)
|
|
|
|
return -EREMOTEIO;
|
2016-04-23 09:47:27 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
|
|
|
|
{
|
2016-05-06 12:43:52 -07:00
|
|
|
struct gb_svc_intf_refclk_request request;
|
|
|
|
struct gb_svc_intf_refclk_response response;
|
|
|
|
int type, ret;
|
2016-04-23 09:47:27 -07:00
|
|
|
|
2016-05-06 12:43:52 -07:00
|
|
|
request.intf_id = intf_id;
|
|
|
|
|
|
|
|
if (enable)
|
|
|
|
type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
|
|
|
|
else
|
|
|
|
type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
|
|
|
|
|
|
|
|
ret = gb_operation_sync(svc->connection, type,
|
2018-11-25 09:58:15 -07:00
|
|
|
&request, sizeof(request),
|
|
|
|
&response, sizeof(response));
|
2016-05-06 12:43:52 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (response.result_code != GB_SVC_INTF_REFCLK_OK)
|
|
|
|
return -EREMOTEIO;
|
2016-04-23 09:47:27 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
|
|
|
|
{
|
2016-05-06 12:43:52 -07:00
|
|
|
struct gb_svc_intf_unipro_request request;
|
|
|
|
struct gb_svc_intf_unipro_response response;
|
|
|
|
int type, ret;
|
|
|
|
|
|
|
|
request.intf_id = intf_id;
|
|
|
|
|
|
|
|
if (enable)
|
|
|
|
type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
|
|
|
|
else
|
|
|
|
type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
|
2016-04-23 09:47:27 -07:00
|
|
|
|
2016-05-06 12:43:52 -07:00
|
|
|
ret = gb_operation_sync(svc->connection, type,
|
2018-11-25 09:58:15 -07:00
|
|
|
&request, sizeof(request),
|
|
|
|
&response, sizeof(response));
|
2016-05-06 12:43:52 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
|
|
|
|
return -EREMOTEIO;
|
2016-04-23 09:47:27 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-23 09:47:28 -07:00
|
|
|
int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
|
|
|
|
{
|
2016-05-06 12:43:53 -07:00
|
|
|
struct gb_svc_intf_activate_request request;
|
|
|
|
struct gb_svc_intf_activate_response response;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
request.intf_id = intf_id;
|
|
|
|
|
|
|
|
ret = gb_operation_sync_timeout(svc->connection,
|
2018-11-25 09:58:15 -07:00
|
|
|
GB_SVC_TYPE_INTF_ACTIVATE,
|
|
|
|
&request, sizeof(request),
|
|
|
|
&response, sizeof(response),
|
|
|
|
SVC_INTF_ACTIVATE_TIMEOUT);
|
2016-05-06 12:43:53 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2016-05-11 10:08:55 -07:00
|
|
|
if (response.status != GB_SVC_OP_SUCCESS) {
|
|
|
|
dev_err(&svc->dev, "failed to activate interface %u: %u\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
intf_id, response.status);
|
2016-05-11 10:08:55 -07:00
|
|
|
return -EREMOTEIO;
|
|
|
|
}
|
2016-04-23 09:47:28 -07:00
|
|
|
|
2016-05-06 12:43:53 -07:00
|
|
|
*intf_type = response.intf_type;
|
2016-04-23 09:47:28 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-07 20:07:00 -07:00
|
|
|
int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
|
|
|
|
{
|
|
|
|
struct gb_svc_intf_resume_request request;
|
|
|
|
struct gb_svc_intf_resume_response response;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
request.intf_id = intf_id;
|
|
|
|
|
|
|
|
ret = gb_operation_sync_timeout(svc->connection,
|
|
|
|
GB_SVC_TYPE_INTF_RESUME,
|
|
|
|
&request, sizeof(request),
|
|
|
|
&response, sizeof(response),
|
|
|
|
SVC_INTF_RESUME_TIMEOUT);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
|
|
|
|
intf_id, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (response.status != GB_SVC_OP_SUCCESS) {
|
|
|
|
dev_err(&svc->dev, "failed to resume interface %u: %u\n",
|
|
|
|
intf_id, response.status);
|
|
|
|
return -EREMOTEIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-09 08:38:29 -07:00
|
|
|
int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
|
|
|
|
u32 *value)
|
|
|
|
{
|
|
|
|
struct gb_svc_dme_peer_get_request request;
|
|
|
|
struct gb_svc_dme_peer_get_response response;
|
|
|
|
u16 result;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
request.intf_id = intf_id;
|
|
|
|
request.attr = cpu_to_le16(attr);
|
|
|
|
request.selector = cpu_to_le16(selector);
|
|
|
|
|
|
|
|
ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
|
|
|
|
&request, sizeof(request),
|
|
|
|
&response, sizeof(response));
|
|
|
|
if (ret) {
|
2015-12-04 09:00:10 -07:00
|
|
|
dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
intf_id, attr, selector, ret);
|
2015-09-09 08:38:29 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
result = le16_to_cpu(response.result_code);
|
|
|
|
if (result) {
|
2015-12-04 09:00:10 -07:00
|
|
|
dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
intf_id, attr, selector, result);
|
2016-05-19 07:20:16 -07:00
|
|
|
return -EREMOTEIO;
|
2015-09-09 08:38:29 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (value)
|
|
|
|
*value = le32_to_cpu(response.attr_value);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
|
|
|
|
u32 value)
|
|
|
|
{
|
|
|
|
struct gb_svc_dme_peer_set_request request;
|
|
|
|
struct gb_svc_dme_peer_set_response response;
|
|
|
|
u16 result;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
request.intf_id = intf_id;
|
|
|
|
request.attr = cpu_to_le16(attr);
|
|
|
|
request.selector = cpu_to_le16(selector);
|
|
|
|
request.value = cpu_to_le32(value);
|
|
|
|
|
|
|
|
ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
|
|
|
|
&request, sizeof(request),
|
|
|
|
&response, sizeof(response));
|
|
|
|
if (ret) {
|
2015-12-04 09:00:10 -07:00
|
|
|
dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
intf_id, attr, selector, value, ret);
|
2015-09-09 08:38:29 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
result = le16_to_cpu(response.result_code);
|
|
|
|
if (result) {
|
2015-12-04 09:00:10 -07:00
|
|
|
dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
intf_id, attr, selector, value, result);
|
2016-05-19 07:20:16 -07:00
|
|
|
return -EREMOTEIO;
|
2015-09-09 08:38:29 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-31 04:51:06 -07:00
|
|
|
int gb_svc_connection_create(struct gb_svc *svc,
|
2018-11-25 09:58:15 -07:00
|
|
|
u8 intf1_id, u16 cport1_id,
|
|
|
|
u8 intf2_id, u16 cport2_id,
|
|
|
|
u8 cport_flags)
|
2015-05-22 11:02:08 -07:00
|
|
|
{
|
|
|
|
struct gb_svc_conn_create_request request;
|
|
|
|
|
|
|
|
request.intf1_id = intf1_id;
|
2015-09-15 07:33:51 -07:00
|
|
|
request.cport1_id = cpu_to_le16(cport1_id);
|
2015-05-22 11:02:08 -07:00
|
|
|
request.intf2_id = intf2_id;
|
2015-09-15 07:33:51 -07:00
|
|
|
request.cport2_id = cpu_to_le16(cport2_id);
|
2016-03-03 05:34:37 -07:00
|
|
|
request.tc = 0; /* TC0 */
|
2016-03-03 05:34:38 -07:00
|
|
|
request.flags = cport_flags;
|
2015-05-22 11:02:08 -07:00
|
|
|
|
|
|
|
return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
|
|
|
|
&request, sizeof(request), NULL, 0);
|
|
|
|
}
|
|
|
|
|
2015-08-31 04:51:06 -07:00
|
|
|
void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
|
|
|
|
u8 intf2_id, u16 cport2_id)
|
2015-05-22 11:02:08 -07:00
|
|
|
{
|
|
|
|
struct gb_svc_conn_destroy_request request;
|
2015-08-31 04:51:05 -07:00
|
|
|
struct gb_connection *connection = svc->connection;
|
|
|
|
int ret;
|
2015-05-22 11:02:08 -07:00
|
|
|
|
|
|
|
request.intf1_id = intf1_id;
|
2015-09-15 07:33:51 -07:00
|
|
|
request.cport1_id = cpu_to_le16(cport1_id);
|
2015-05-22 11:02:08 -07:00
|
|
|
request.intf2_id = intf2_id;
|
2015-09-15 07:33:51 -07:00
|
|
|
request.cport2_id = cpu_to_le16(cport2_id);
|
2015-05-22 11:02:08 -07:00
|
|
|
|
2015-08-31 04:51:05 -07:00
|
|
|
ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
|
|
|
|
&request, sizeof(request), NULL, 0);
|
2015-11-25 07:59:19 -07:00
|
|
|
if (ret) {
|
2015-12-04 09:00:09 -07:00
|
|
|
dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
intf1_id, cport1_id, intf2_id, cport2_id, ret);
|
2015-11-25 07:59:19 -07:00
|
|
|
}
|
2015-05-22 11:02:08 -07:00
|
|
|
}
|
|
|
|
|
2015-09-07 03:31:25 -07:00
|
|
|
/* Creates bi-directional routes between the devices */
|
2016-03-29 15:56:04 -07:00
|
|
|
int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
|
2018-11-25 09:58:15 -07:00
|
|
|
u8 intf2_id, u8 dev2_id)
|
2015-07-24 16:02:31 -07:00
|
|
|
{
|
|
|
|
struct gb_svc_route_create_request request;
|
|
|
|
|
|
|
|
request.intf1_id = intf1_id;
|
|
|
|
request.dev1_id = dev1_id;
|
|
|
|
request.intf2_id = intf2_id;
|
|
|
|
request.dev2_id = dev2_id;
|
|
|
|
|
|
|
|
return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
|
|
|
|
&request, sizeof(request), NULL, 0);
|
|
|
|
}
|
|
|
|
|
2015-09-07 05:35:26 -07:00
|
|
|
/* Destroys bi-directional routes between the devices */
|
2016-03-29 15:56:04 -07:00
|
|
|
void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
|
2015-09-07 05:35:26 -07:00
|
|
|
{
|
|
|
|
struct gb_svc_route_destroy_request request;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
request.intf1_id = intf1_id;
|
|
|
|
request.intf2_id = intf2_id;
|
|
|
|
|
|
|
|
ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
|
|
|
|
&request, sizeof(request), NULL, 0);
|
2015-11-25 07:59:19 -07:00
|
|
|
if (ret) {
|
2015-12-04 09:00:09 -07:00
|
|
|
dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
intf1_id, intf2_id, ret);
|
2015-11-25 07:59:19 -07:00
|
|
|
}
|
2015-09-07 05:35:26 -07:00
|
|
|
}
|
|
|
|
|
2016-01-06 07:16:46 -07:00
|
|
|
int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
|
|
|
|
u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
|
2016-06-03 08:24:44 -07:00
|
|
|
u8 tx_amplitude, u8 tx_hs_equalizer,
|
2016-01-06 07:16:46 -07:00
|
|
|
u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
|
2016-06-03 08:24:44 -07:00
|
|
|
u8 flags, u32 quirks,
|
|
|
|
struct gb_svc_l2_timer_cfg *local,
|
|
|
|
struct gb_svc_l2_timer_cfg *remote)
|
2015-12-18 12:23:22 -07:00
|
|
|
{
|
2016-01-06 07:16:46 -07:00
|
|
|
struct gb_svc_intf_set_pwrm_request request;
|
|
|
|
struct gb_svc_intf_set_pwrm_response response;
|
|
|
|
int ret;
|
2016-06-03 08:24:44 -07:00
|
|
|
u16 result_code;
|
|
|
|
|
|
|
|
memset(&request, 0, sizeof(request));
|
2015-12-18 12:23:22 -07:00
|
|
|
|
|
|
|
request.intf_id = intf_id;
|
2016-01-06 07:16:46 -07:00
|
|
|
request.hs_series = hs_series;
|
|
|
|
request.tx_mode = tx_mode;
|
|
|
|
request.tx_gear = tx_gear;
|
|
|
|
request.tx_nlanes = tx_nlanes;
|
2016-06-03 08:24:44 -07:00
|
|
|
request.tx_amplitude = tx_amplitude;
|
|
|
|
request.tx_hs_equalizer = tx_hs_equalizer;
|
2016-01-06 07:16:46 -07:00
|
|
|
request.rx_mode = rx_mode;
|
|
|
|
request.rx_gear = rx_gear;
|
|
|
|
request.rx_nlanes = rx_nlanes;
|
2015-12-18 12:23:22 -07:00
|
|
|
request.flags = flags;
|
2016-01-06 07:16:46 -07:00
|
|
|
request.quirks = cpu_to_le32(quirks);
|
2016-06-03 08:24:44 -07:00
|
|
|
if (local)
|
|
|
|
request.local_l2timerdata = *local;
|
|
|
|
if (remote)
|
|
|
|
request.remote_l2timerdata = *remote;
|
2015-12-18 12:23:22 -07:00
|
|
|
|
2016-01-06 07:16:46 -07:00
|
|
|
ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
|
|
|
|
&request, sizeof(request),
|
|
|
|
&response, sizeof(response));
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2016-06-03 08:24:44 -07:00
|
|
|
result_code = response.result_code;
|
|
|
|
if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
|
|
|
|
dev_err(&svc->dev, "set power mode = %d\n", result_code);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2015-12-18 12:23:22 -07:00
|
|
|
}
|
2016-08-18 12:30:19 -07:00
|
|
|
EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
|
2015-12-18 12:23:22 -07:00
|
|
|
|
2016-07-07 20:07:00 -07:00
|
|
|
int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
|
|
|
|
{
|
|
|
|
struct gb_svc_intf_set_pwrm_request request;
|
|
|
|
struct gb_svc_intf_set_pwrm_response response;
|
|
|
|
int ret;
|
|
|
|
u16 result_code;
|
|
|
|
|
|
|
|
memset(&request, 0, sizeof(request));
|
|
|
|
|
|
|
|
request.intf_id = intf_id;
|
|
|
|
request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
|
|
|
|
request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
|
|
|
|
request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
|
|
|
|
|
|
|
|
ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
|
|
|
|
&request, sizeof(request),
|
|
|
|
&response, sizeof(response));
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&svc->dev,
|
|
|
|
"failed to send set power mode operation to interface %u: %d\n",
|
|
|
|
intf_id, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
result_code = response.result_code;
|
|
|
|
if (result_code != GB_SVC_SETPWRM_PWR_OK) {
|
|
|
|
dev_err(&svc->dev,
|
|
|
|
"failed to hibernate the link for interface %u: %u\n",
|
|
|
|
intf_id, result_code);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-20 00:30:42 -07:00
|
|
|
int gb_svc_ping(struct gb_svc *svc)
|
|
|
|
{
|
2016-01-26 09:57:50 -07:00
|
|
|
return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
|
|
|
|
NULL, 0, NULL, 0,
|
|
|
|
GB_OPERATION_TIMEOUT_DEFAULT * 2);
|
2016-01-20 00:30:42 -07:00
|
|
|
}
|
|
|
|
|
2015-07-21 05:14:19 -07:00
|
|
|
static int gb_svc_version_request(struct gb_operation *op)
|
|
|
|
{
|
|
|
|
struct gb_connection *connection = op->connection;
|
2016-03-22 11:30:35 -07:00
|
|
|
struct gb_svc *svc = gb_connection_get_data(connection);
|
2016-04-29 08:08:36 -07:00
|
|
|
struct gb_svc_version_request *request;
|
|
|
|
struct gb_svc_version_response *response;
|
2015-07-21 05:14:19 -07:00
|
|
|
|
2015-11-19 10:28:01 -07:00
|
|
|
if (op->request->payload_size < sizeof(*request)) {
|
2015-11-25 07:59:19 -07:00
|
|
|
dev_err(&svc->dev, "short version request (%zu < %zu)\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
op->request->payload_size,
|
|
|
|
sizeof(*request));
|
2015-11-19 10:28:01 -07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-09-15 01:48:01 -07:00
|
|
|
request = op->request->payload;
|
2015-07-21 05:14:19 -07:00
|
|
|
|
2015-09-15 01:48:01 -07:00
|
|
|
if (request->major > GB_SVC_VERSION_MAJOR) {
|
2015-12-04 09:00:09 -07:00
|
|
|
dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
request->major, GB_SVC_VERSION_MAJOR);
|
2015-07-21 05:14:19 -07:00
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
2016-01-19 04:51:19 -07:00
|
|
|
svc->protocol_major = request->major;
|
|
|
|
svc->protocol_minor = request->minor;
|
2015-08-10 19:06:14 -07:00
|
|
|
|
2015-11-25 07:59:19 -07:00
|
|
|
if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
|
2015-07-21 05:14:19 -07:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2015-09-15 01:48:01 -07:00
|
|
|
response = op->response->payload;
|
2016-01-19 04:51:19 -07:00
|
|
|
response->major = svc->protocol_major;
|
|
|
|
response->minor = svc->protocol_minor;
|
2015-09-15 01:48:00 -07:00
|
|
|
|
2015-07-21 05:14:19 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-20 16:55:08 -07:00
|
|
|
static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
|
|
|
|
size_t len, loff_t *offset)
|
|
|
|
{
|
2017-02-09 09:30:11 -07:00
|
|
|
struct svc_debugfs_pwrmon_rail *pwrmon_rails =
|
|
|
|
file_inode(file)->i_private;
|
2016-04-20 16:55:08 -07:00
|
|
|
struct gb_svc *svc = pwrmon_rails->svc;
|
|
|
|
int ret, desc;
|
|
|
|
u32 value;
|
|
|
|
char buff[16];
|
|
|
|
|
|
|
|
ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
|
|
|
|
GB_SVC_PWRMON_TYPE_VOL, &value);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&svc->dev,
|
2016-04-21 02:43:36 -07:00
|
|
|
"failed to get voltage sample %u: %d\n",
|
|
|
|
pwrmon_rails->id, ret);
|
2016-04-20 16:55:08 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc = scnprintf(buff, sizeof(buff), "%u\n", value);
|
|
|
|
|
|
|
|
return simple_read_from_buffer(buf, len, offset, buff, desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
|
|
|
|
size_t len, loff_t *offset)
|
|
|
|
{
|
2017-02-09 09:30:11 -07:00
|
|
|
struct svc_debugfs_pwrmon_rail *pwrmon_rails =
|
|
|
|
file_inode(file)->i_private;
|
2016-04-20 16:55:08 -07:00
|
|
|
struct gb_svc *svc = pwrmon_rails->svc;
|
|
|
|
int ret, desc;
|
|
|
|
u32 value;
|
|
|
|
char buff[16];
|
|
|
|
|
|
|
|
ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
|
|
|
|
GB_SVC_PWRMON_TYPE_CURR, &value);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&svc->dev,
|
2016-04-21 02:43:36 -07:00
|
|
|
"failed to get current sample %u: %d\n",
|
|
|
|
pwrmon_rails->id, ret);
|
2016-04-20 16:55:08 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc = scnprintf(buff, sizeof(buff), "%u\n", value);
|
|
|
|
|
|
|
|
return simple_read_from_buffer(buf, len, offset, buff, desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
|
|
|
|
size_t len, loff_t *offset)
|
|
|
|
{
|
2017-02-09 09:30:11 -07:00
|
|
|
struct svc_debugfs_pwrmon_rail *pwrmon_rails =
|
|
|
|
file_inode(file)->i_private;
|
2016-04-20 16:55:08 -07:00
|
|
|
struct gb_svc *svc = pwrmon_rails->svc;
|
|
|
|
int ret, desc;
|
|
|
|
u32 value;
|
|
|
|
char buff[16];
|
|
|
|
|
|
|
|
ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
|
|
|
|
GB_SVC_PWRMON_TYPE_PWR, &value);
|
|
|
|
if (ret) {
|
2016-04-21 02:43:36 -07:00
|
|
|
dev_err(&svc->dev, "failed to get power sample %u: %d\n",
|
|
|
|
pwrmon_rails->id, ret);
|
2016-04-20 16:55:08 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc = scnprintf(buff, sizeof(buff), "%u\n", value);
|
|
|
|
|
|
|
|
return simple_read_from_buffer(buf, len, offset, buff, desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations pwrmon_debugfs_voltage_fops = {
|
|
|
|
.read = pwr_debugfs_voltage_read,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct file_operations pwrmon_debugfs_current_fops = {
|
|
|
|
.read = pwr_debugfs_current_read,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct file_operations pwrmon_debugfs_power_fops = {
|
|
|
|
.read = pwr_debugfs_power_read,
|
|
|
|
};
|
|
|
|
|
2016-04-23 09:47:20 -07:00
|
|
|
static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
|
2016-04-20 16:55:08 -07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
size_t bufsize;
|
|
|
|
struct dentry *dent;
|
2016-04-22 19:03:42 -07:00
|
|
|
struct gb_svc_pwrmon_rail_names_get_response *rail_names;
|
|
|
|
u8 rail_count;
|
2016-04-20 16:55:08 -07:00
|
|
|
|
|
|
|
dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
|
|
|
|
if (IS_ERR_OR_NULL(dent))
|
|
|
|
return;
|
|
|
|
|
2016-04-22 19:03:42 -07:00
|
|
|
if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
|
2016-04-20 16:55:08 -07:00
|
|
|
goto err_pwrmon_debugfs;
|
|
|
|
|
2016-04-22 19:03:42 -07:00
|
|
|
if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
|
2016-04-20 16:55:08 -07:00
|
|
|
goto err_pwrmon_debugfs;
|
|
|
|
|
2016-06-08 00:39:00 -07:00
|
|
|
bufsize = sizeof(*rail_names) +
|
|
|
|
GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
|
2016-04-20 16:55:08 -07:00
|
|
|
|
2016-04-22 19:03:42 -07:00
|
|
|
rail_names = kzalloc(bufsize, GFP_KERNEL);
|
|
|
|
if (!rail_names)
|
2016-04-20 16:55:08 -07:00
|
|
|
goto err_pwrmon_debugfs;
|
|
|
|
|
2016-04-22 19:03:42 -07:00
|
|
|
svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
|
2016-04-20 16:55:08 -07:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!svc->pwrmon_rails)
|
|
|
|
goto err_pwrmon_debugfs_free;
|
|
|
|
|
2016-04-22 19:03:42 -07:00
|
|
|
if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
|
2016-04-20 16:55:08 -07:00
|
|
|
goto err_pwrmon_debugfs_free;
|
|
|
|
|
2016-04-22 19:03:42 -07:00
|
|
|
for (i = 0; i < rail_count; i++) {
|
2016-04-20 16:55:08 -07:00
|
|
|
struct dentry *dir;
|
|
|
|
struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
|
|
|
|
char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
|
|
|
|
|
|
|
|
snprintf(fname, sizeof(fname), "%s",
|
2016-04-22 19:03:42 -07:00
|
|
|
(char *)&rail_names->name[i]);
|
2016-04-20 16:55:08 -07:00
|
|
|
|
|
|
|
rail->id = i;
|
|
|
|
rail->svc = svc;
|
|
|
|
|
|
|
|
dir = debugfs_create_dir(fname, dent);
|
2017-02-09 09:30:12 -07:00
|
|
|
debugfs_create_file("voltage_now", 0444, dir, rail,
|
2016-04-20 16:55:08 -07:00
|
|
|
&pwrmon_debugfs_voltage_fops);
|
2017-02-09 09:30:12 -07:00
|
|
|
debugfs_create_file("current_now", 0444, dir, rail,
|
2016-04-20 16:55:08 -07:00
|
|
|
&pwrmon_debugfs_current_fops);
|
2017-02-09 09:30:12 -07:00
|
|
|
debugfs_create_file("power_now", 0444, dir, rail,
|
2016-04-20 16:55:08 -07:00
|
|
|
&pwrmon_debugfs_power_fops);
|
2016-05-24 11:34:52 -07:00
|
|
|
}
|
2016-04-22 19:03:42 -07:00
|
|
|
|
|
|
|
kfree(rail_names);
|
2016-04-20 16:55:08 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
err_pwrmon_debugfs_free:
|
2016-04-22 19:03:42 -07:00
|
|
|
kfree(rail_names);
|
2016-04-20 16:55:08 -07:00
|
|
|
kfree(svc->pwrmon_rails);
|
|
|
|
svc->pwrmon_rails = NULL;
|
|
|
|
|
|
|
|
err_pwrmon_debugfs:
|
|
|
|
debugfs_remove(dent);
|
|
|
|
}
|
|
|
|
|
2016-04-23 09:47:20 -07:00
|
|
|
static void gb_svc_debugfs_init(struct gb_svc *svc)
|
2016-04-20 16:55:08 -07:00
|
|
|
{
|
|
|
|
svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
|
|
|
|
gb_debugfs_get());
|
2016-04-23 09:47:20 -07:00
|
|
|
gb_svc_pwrmon_debugfs_init(svc);
|
2016-04-20 16:55:08 -07:00
|
|
|
}
|
|
|
|
|
2016-04-23 09:47:20 -07:00
|
|
|
static void gb_svc_debugfs_exit(struct gb_svc *svc)
|
2016-04-20 16:55:08 -07:00
|
|
|
{
|
|
|
|
debugfs_remove_recursive(svc->debugfs_dentry);
|
2016-04-22 19:03:43 -07:00
|
|
|
kfree(svc->pwrmon_rails);
|
|
|
|
svc->pwrmon_rails = NULL;
|
2016-04-20 16:55:08 -07:00
|
|
|
}
|
|
|
|
|
2015-07-21 05:14:19 -07:00
|
|
|
static int gb_svc_hello(struct gb_operation *op)
|
|
|
|
{
|
|
|
|
struct gb_connection *connection = op->connection;
|
2016-03-22 11:30:35 -07:00
|
|
|
struct gb_svc *svc = gb_connection_get_data(connection);
|
2015-07-21 05:14:19 -07:00
|
|
|
struct gb_svc_hello_request *hello_request;
|
|
|
|
int ret;
|
|
|
|
|
2015-08-10 18:59:19 -07:00
|
|
|
if (op->request->payload_size < sizeof(*hello_request)) {
|
2015-11-25 07:59:19 -07:00
|
|
|
dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
op->request->payload_size,
|
|
|
|
sizeof(*hello_request));
|
2015-07-21 05:14:19 -07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
hello_request = op->request->payload;
|
2015-11-25 07:59:09 -07:00
|
|
|
svc->endo_id = le16_to_cpu(hello_request->endo_id);
|
|
|
|
svc->ap_intf_id = hello_request->interface_id;
|
2015-07-21 05:14:19 -07:00
|
|
|
|
2015-11-25 07:59:08 -07:00
|
|
|
ret = device_add(&svc->dev);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-01-20 23:51:49 -07:00
|
|
|
ret = gb_svc_watchdog_create(svc);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
|
2022-02-02 04:33:46 -07:00
|
|
|
goto err_deregister_svc;
|
2016-01-20 23:51:49 -07:00
|
|
|
}
|
|
|
|
|
2022-02-02 04:33:47 -07:00
|
|
|
/*
|
|
|
|
* FIXME: This is a temporary hack to reconfigure the link at HELLO
|
|
|
|
* (which abuses the deferred request processing mechanism).
|
|
|
|
*/
|
2022-02-02 04:33:45 -07:00
|
|
|
ret = gb_svc_queue_deferred_request(op);
|
|
|
|
if (ret)
|
2022-02-02 04:33:47 -07:00
|
|
|
goto err_destroy_watchdog;
|
|
|
|
|
|
|
|
gb_svc_debugfs_init(svc);
|
2016-06-05 06:03:27 -07:00
|
|
|
|
2022-02-02 04:33:45 -07:00
|
|
|
return 0;
|
|
|
|
|
2022-02-02 04:33:47 -07:00
|
|
|
err_destroy_watchdog:
|
2016-06-05 06:03:27 -07:00
|
|
|
gb_svc_watchdog_destroy(svc);
|
2022-02-02 04:33:46 -07:00
|
|
|
err_deregister_svc:
|
2016-06-05 06:03:27 -07:00
|
|
|
device_del(&svc->dev);
|
2022-02-02 04:33:46 -07:00
|
|
|
|
2016-06-05 06:03:27 -07:00
|
|
|
return ret;
|
2015-07-21 05:14:19 -07:00
|
|
|
}
|
|
|
|
|
2016-04-23 09:47:31 -07:00
|
|
|
static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
|
2018-11-25 09:58:15 -07:00
|
|
|
u8 intf_id)
|
2016-04-23 09:47:31 -07:00
|
|
|
{
|
|
|
|
struct gb_host_device *hd = svc->hd;
|
|
|
|
struct gb_module *module;
|
|
|
|
size_t num_interfaces;
|
|
|
|
u8 module_id;
|
|
|
|
|
|
|
|
list_for_each_entry(module, &hd->modules, hd_node) {
|
|
|
|
module_id = module->module_id;
|
|
|
|
num_interfaces = module->num_interfaces;
|
|
|
|
|
|
|
|
if (intf_id >= module_id &&
|
2018-11-25 09:58:15 -07:00
|
|
|
intf_id < module_id + num_interfaces) {
|
2016-04-23 09:47:31 -07:00
|
|
|
return module->interfaces[intf_id - module_id];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-04-23 09:47:24 -07:00
|
|
|
static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
|
|
|
|
{
|
|
|
|
struct gb_host_device *hd = svc->hd;
|
|
|
|
struct gb_module *module;
|
|
|
|
|
|
|
|
list_for_each_entry(module, &hd->modules, hd_node) {
|
|
|
|
if (module->module_id == module_id)
|
|
|
|
return module;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
greybus: svc: reconfig APBridgeA-Switch link to handle required load
SW-4894, SW-4389, and share a common root cause, namely that
the power-on reset configuration of the APBridgeA-Switch link of PWM
Gear 1, 1 Lane, Slow Auto, is insufficient to handle some required
traffic loads, such as 3 audio streams plus boot-over-UniPro or 4 audio
streams.
The correct long-term solution is to implement a UniPro Power Mode
Manager as in that considers the demands placed on the network,
and adjusts power modes accordingly.
The present commit implements a short-term, brute-force hack to allow
continued system testing:
- Upon receiving an SVC HELLO request, schedule deferred work to
reconfigure the APB1-Switch link to PWM G2, 1 lane, Slow Auto
- When the Camera driver transitions a White Camera module from active to
inactive, return the APB1-Switch link to PWM G2, 1 lane, Slow Auto
The Camera driver already steps up the APBridgeA-Camera link speed while a
camera module is active, which affords sufficient margin for simultaneous
audio and hotplug activity, and the Camera driver already steps down the
link speed thereafter: the change made by the present patch is simply to
tweak the stepped-down power mode to match the new baseline configuration.
Signed-off-by: Mitchell Tasman <tasman@leaflabs.com>
Tested-by: Mark Greer <mgreer@animalcreek.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2016-05-04 14:30:23 -07:00
|
|
|
static void gb_svc_process_hello_deferred(struct gb_operation *operation)
|
|
|
|
{
|
|
|
|
struct gb_connection *connection = operation->connection;
|
|
|
|
struct gb_svc *svc = gb_connection_get_data(connection);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
|
|
|
|
* link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
|
|
|
|
* bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
|
|
|
|
* module.
|
|
|
|
*
|
|
|
|
* The code should be removed once SW-2217, Heuristic for UniPro
|
|
|
|
* Power Mode Changes is resolved.
|
|
|
|
*/
|
|
|
|
ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
|
2017-02-09 09:30:11 -07:00
|
|
|
GB_SVC_UNIPRO_HS_SERIES_A,
|
|
|
|
GB_SVC_UNIPRO_SLOW_AUTO_MODE,
|
|
|
|
2, 1,
|
|
|
|
GB_SVC_SMALL_AMPLITUDE,
|
|
|
|
GB_SVC_NO_DE_EMPHASIS,
|
|
|
|
GB_SVC_UNIPRO_SLOW_AUTO_MODE,
|
|
|
|
2, 1,
|
|
|
|
0, 0,
|
|
|
|
NULL, NULL);
|
greybus: svc: reconfig APBridgeA-Switch link to handle required load
SW-4894, SW-4389, and share a common root cause, namely that
the power-on reset configuration of the APBridgeA-Switch link of PWM
Gear 1, 1 Lane, Slow Auto, is insufficient to handle some required
traffic loads, such as 3 audio streams plus boot-over-UniPro or 4 audio
streams.
The correct long-term solution is to implement a UniPro Power Mode
Manager as in that considers the demands placed on the network,
and adjusts power modes accordingly.
The present commit implements a short-term, brute-force hack to allow
continued system testing:
- Upon receiving an SVC HELLO request, schedule deferred work to
reconfigure the APB1-Switch link to PWM G2, 1 lane, Slow Auto
- When the Camera driver transitions a White Camera module from active to
inactive, return the APB1-Switch link to PWM G2, 1 lane, Slow Auto
The Camera driver already steps up the APBridgeA-Camera link speed while a
camera module is active, which affords sufficient margin for simultaneous
audio and hotplug activity, and the Camera driver already steps down the
link speed thereafter: the change made by the present patch is simply to
tweak the stepped-down power mode to match the new baseline configuration.
Signed-off-by: Mitchell Tasman <tasman@leaflabs.com>
Tested-by: Mark Greer <mgreer@animalcreek.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2016-05-04 14:30:23 -07:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
dev_warn(&svc->dev,
|
2018-11-25 09:58:15 -07:00
|
|
|
"power mode change failed on AP to switch link: %d\n",
|
|
|
|
ret);
|
greybus: svc: reconfig APBridgeA-Switch link to handle required load
SW-4894, SW-4389, and share a common root cause, namely that
the power-on reset configuration of the APBridgeA-Switch link of PWM
Gear 1, 1 Lane, Slow Auto, is insufficient to handle some required
traffic loads, such as 3 audio streams plus boot-over-UniPro or 4 audio
streams.
The correct long-term solution is to implement a UniPro Power Mode
Manager as in that considers the demands placed on the network,
and adjusts power modes accordingly.
The present commit implements a short-term, brute-force hack to allow
continued system testing:
- Upon receiving an SVC HELLO request, schedule deferred work to
reconfigure the APB1-Switch link to PWM G2, 1 lane, Slow Auto
- When the Camera driver transitions a White Camera module from active to
inactive, return the APB1-Switch link to PWM G2, 1 lane, Slow Auto
The Camera driver already steps up the APBridgeA-Camera link speed while a
camera module is active, which affords sufficient margin for simultaneous
audio and hotplug activity, and the Camera driver already steps down the
link speed thereafter: the change made by the present patch is simply to
tweak the stepped-down power mode to match the new baseline configuration.
Signed-off-by: Mitchell Tasman <tasman@leaflabs.com>
Tested-by: Mark Greer <mgreer@animalcreek.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2016-05-04 14:30:23 -07:00
|
|
|
}
|
|
|
|
|
2016-04-23 09:47:30 -07:00
|
|
|
static void gb_svc_process_module_inserted(struct gb_operation *operation)
|
|
|
|
{
|
|
|
|
struct gb_svc_module_inserted_request *request;
|
|
|
|
struct gb_connection *connection = operation->connection;
|
|
|
|
struct gb_svc *svc = gb_connection_get_data(connection);
|
|
|
|
struct gb_host_device *hd = svc->hd;
|
|
|
|
struct gb_module *module;
|
|
|
|
size_t num_interfaces;
|
|
|
|
u8 module_id;
|
|
|
|
u16 flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* The request message size has already been verified. */
|
|
|
|
request = operation->request->payload;
|
|
|
|
module_id = request->primary_intf_id;
|
|
|
|
num_interfaces = request->intf_count;
|
|
|
|
flags = le16_to_cpu(request->flags);
|
|
|
|
|
|
|
|
dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
__func__, module_id, num_interfaces, flags);
|
2016-04-23 09:47:30 -07:00
|
|
|
|
|
|
|
if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
|
|
|
|
dev_warn(&svc->dev, "no primary interface detected on module %u\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
module_id);
|
2016-04-23 09:47:30 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
module = gb_svc_module_lookup(svc, module_id);
|
|
|
|
if (module) {
|
|
|
|
dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
module_id);
|
2016-04-23 09:47:30 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
module = gb_module_create(hd, module_id, num_interfaces);
|
|
|
|
if (!module) {
|
|
|
|
dev_err(&svc->dev, "failed to create module\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = gb_module_add(module);
|
|
|
|
if (ret) {
|
|
|
|
gb_module_put(module);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add(&module->hd_node, &hd->modules);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gb_svc_process_module_removed(struct gb_operation *operation)
|
|
|
|
{
|
|
|
|
struct gb_svc_module_removed_request *request;
|
|
|
|
struct gb_connection *connection = operation->connection;
|
|
|
|
struct gb_svc *svc = gb_connection_get_data(connection);
|
|
|
|
struct gb_module *module;
|
|
|
|
u8 module_id;
|
|
|
|
|
|
|
|
/* The request message size has already been verified. */
|
|
|
|
request = operation->request->payload;
|
|
|
|
module_id = request->primary_intf_id;
|
|
|
|
|
|
|
|
dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
|
|
|
|
|
|
|
|
module = gb_svc_module_lookup(svc, module_id);
|
|
|
|
if (!module) {
|
|
|
|
dev_warn(&svc->dev, "unexpected module-removed event %u\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
module_id);
|
2016-04-23 09:47:30 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
module->disconnected = true;
|
|
|
|
|
|
|
|
gb_module_del(module);
|
|
|
|
list_del(&module->hd_node);
|
|
|
|
gb_module_put(module);
|
|
|
|
}
|
|
|
|
|
2016-08-09 14:37:32 -07:00
|
|
|
static void gb_svc_process_intf_oops(struct gb_operation *operation)
|
|
|
|
{
|
|
|
|
struct gb_svc_intf_oops_request *request;
|
|
|
|
struct gb_connection *connection = operation->connection;
|
|
|
|
struct gb_svc *svc = gb_connection_get_data(connection);
|
|
|
|
struct gb_interface *intf;
|
|
|
|
u8 intf_id;
|
|
|
|
u8 reason;
|
|
|
|
|
|
|
|
/* The request message size has already been verified. */
|
|
|
|
request = operation->request->payload;
|
|
|
|
intf_id = request->intf_id;
|
|
|
|
reason = request->reason;
|
|
|
|
|
|
|
|
intf = gb_svc_interface_lookup(svc, intf_id);
|
|
|
|
if (!intf) {
|
|
|
|
dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
|
|
|
|
intf_id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
|
|
|
|
intf_id, reason);
|
|
|
|
|
|
|
|
mutex_lock(&intf->mutex);
|
|
|
|
intf->disconnected = true;
|
|
|
|
gb_interface_disable(intf);
|
|
|
|
gb_interface_deactivate(intf);
|
|
|
|
mutex_unlock(&intf->mutex);
|
|
|
|
}
|
|
|
|
|
2016-04-23 09:47:31 -07:00
|
|
|
static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
|
|
|
|
{
|
|
|
|
struct gb_svc_intf_mailbox_event_request *request;
|
|
|
|
struct gb_connection *connection = operation->connection;
|
|
|
|
struct gb_svc *svc = gb_connection_get_data(connection);
|
|
|
|
struct gb_interface *intf;
|
|
|
|
u8 intf_id;
|
|
|
|
u16 result_code;
|
|
|
|
u32 mailbox;
|
|
|
|
|
|
|
|
/* The request message size has already been verified. */
|
|
|
|
request = operation->request->payload;
|
|
|
|
intf_id = request->intf_id;
|
|
|
|
result_code = le16_to_cpu(request->result_code);
|
|
|
|
mailbox = le32_to_cpu(request->mailbox);
|
|
|
|
|
|
|
|
dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
__func__, intf_id, result_code, mailbox);
|
2016-04-23 09:47:31 -07:00
|
|
|
|
|
|
|
intf = gb_svc_interface_lookup(svc, intf_id);
|
|
|
|
if (!intf) {
|
|
|
|
dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-05-27 08:26:40 -07:00
|
|
|
gb_interface_mailbox_event(intf, result_code, mailbox);
|
2016-04-23 09:47:31 -07:00
|
|
|
}
|
|
|
|
|
2015-12-02 10:23:29 -07:00
|
|
|
static void gb_svc_process_deferred_request(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct gb_svc_deferred_request *dr;
|
|
|
|
struct gb_operation *operation;
|
|
|
|
struct gb_svc *svc;
|
|
|
|
u8 type;
|
|
|
|
|
|
|
|
dr = container_of(work, struct gb_svc_deferred_request, work);
|
|
|
|
operation = dr->operation;
|
2016-03-22 11:30:35 -07:00
|
|
|
svc = gb_connection_get_data(operation->connection);
|
2015-12-02 10:23:29 -07:00
|
|
|
type = operation->request->header->type;
|
|
|
|
|
|
|
|
switch (type) {
|
greybus: svc: reconfig APBridgeA-Switch link to handle required load
SW-4894, SW-4389, and share a common root cause, namely that
the power-on reset configuration of the APBridgeA-Switch link of PWM
Gear 1, 1 Lane, Slow Auto, is insufficient to handle some required
traffic loads, such as 3 audio streams plus boot-over-UniPro or 4 audio
streams.
The correct long-term solution is to implement a UniPro Power Mode
Manager as in that considers the demands placed on the network,
and adjusts power modes accordingly.
The present commit implements a short-term, brute-force hack to allow
continued system testing:
- Upon receiving an SVC HELLO request, schedule deferred work to
reconfigure the APB1-Switch link to PWM G2, 1 lane, Slow Auto
- When the Camera driver transitions a White Camera module from active to
inactive, return the APB1-Switch link to PWM G2, 1 lane, Slow Auto
The Camera driver already steps up the APBridgeA-Camera link speed while a
camera module is active, which affords sufficient margin for simultaneous
audio and hotplug activity, and the Camera driver already steps down the
link speed thereafter: the change made by the present patch is simply to
tweak the stepped-down power mode to match the new baseline configuration.
Signed-off-by: Mitchell Tasman <tasman@leaflabs.com>
Tested-by: Mark Greer <mgreer@animalcreek.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2016-05-04 14:30:23 -07:00
|
|
|
case GB_SVC_TYPE_SVC_HELLO:
|
|
|
|
gb_svc_process_hello_deferred(operation);
|
|
|
|
break;
|
2016-04-23 09:47:30 -07:00
|
|
|
case GB_SVC_TYPE_MODULE_INSERTED:
|
|
|
|
gb_svc_process_module_inserted(operation);
|
|
|
|
break;
|
|
|
|
case GB_SVC_TYPE_MODULE_REMOVED:
|
|
|
|
gb_svc_process_module_removed(operation);
|
|
|
|
break;
|
2016-04-23 09:47:31 -07:00
|
|
|
case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
|
|
|
|
gb_svc_process_intf_mailbox_event(operation);
|
|
|
|
break;
|
2016-08-09 14:37:32 -07:00
|
|
|
case GB_SVC_TYPE_INTF_OOPS:
|
|
|
|
gb_svc_process_intf_oops(operation);
|
|
|
|
break;
|
2015-12-02 10:23:29 -07:00
|
|
|
default:
|
2015-12-04 09:00:10 -07:00
|
|
|
dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
|
2015-12-02 10:23:29 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
gb_operation_put(operation);
|
|
|
|
kfree(dr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gb_svc_queue_deferred_request(struct gb_operation *operation)
|
|
|
|
{
|
2016-03-22 11:30:35 -07:00
|
|
|
struct gb_svc *svc = gb_connection_get_data(operation->connection);
|
2015-12-02 10:23:29 -07:00
|
|
|
struct gb_svc_deferred_request *dr;
|
|
|
|
|
|
|
|
dr = kmalloc(sizeof(*dr), GFP_KERNEL);
|
|
|
|
if (!dr)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
gb_operation_get(operation);
|
|
|
|
|
|
|
|
dr->operation = operation;
|
|
|
|
INIT_WORK(&dr->work, gb_svc_process_deferred_request);
|
|
|
|
|
2015-12-02 10:23:31 -07:00
|
|
|
queue_work(svc->wq, &dr->work);
|
2015-12-02 10:23:29 -07:00
|
|
|
|
|
|
|
return 0;
|
2015-08-06 00:14:55 -07:00
|
|
|
}
|
2015-07-21 05:14:19 -07:00
|
|
|
|
2015-05-22 11:02:08 -07:00
|
|
|
static int gb_svc_intf_reset_recv(struct gb_operation *op)
|
|
|
|
{
|
2016-03-22 11:30:35 -07:00
|
|
|
struct gb_svc *svc = gb_connection_get_data(op->connection);
|
2015-05-22 11:02:08 -07:00
|
|
|
struct gb_message *request = op->request;
|
|
|
|
struct gb_svc_intf_reset_request *reset;
|
|
|
|
|
|
|
|
if (request->payload_size < sizeof(*reset)) {
|
2015-11-25 07:59:19 -07:00
|
|
|
dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
request->payload_size, sizeof(*reset));
|
2015-05-22 11:02:08 -07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
reset = request->payload;
|
|
|
|
|
|
|
|
/* FIXME Reset the interface here */
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-23 09:47:30 -07:00
|
|
|
static int gb_svc_module_inserted_recv(struct gb_operation *op)
|
|
|
|
{
|
|
|
|
struct gb_svc *svc = gb_connection_get_data(op->connection);
|
|
|
|
struct gb_svc_module_inserted_request *request;
|
|
|
|
|
|
|
|
if (op->request->payload_size < sizeof(*request)) {
|
|
|
|
dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
op->request->payload_size, sizeof(*request));
|
2016-04-23 09:47:30 -07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
request = op->request->payload;
|
|
|
|
|
|
|
|
dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
|
2018-11-25 09:58:15 -07:00
|
|
|
request->primary_intf_id);
|
2016-04-23 09:47:30 -07:00
|
|
|
|
|
|
|
return gb_svc_queue_deferred_request(op);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gb_svc_module_removed_recv(struct gb_operation *op)
|
|
|
|
{
|
|
|
|
struct gb_svc *svc = gb_connection_get_data(op->connection);
|
|
|
|
struct gb_svc_module_removed_request *request;
|
|
|
|
|
|
|
|
if (op->request->payload_size < sizeof(*request)) {
|
|
|
|
dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
op->request->payload_size, sizeof(*request));
|
2016-04-23 09:47:30 -07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
request = op->request->payload;
|
|
|
|
|
|
|
|
dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
|
2018-11-25 09:58:15 -07:00
|
|
|
request->primary_intf_id);
|
2016-04-23 09:47:30 -07:00
|
|
|
|
|
|
|
return gb_svc_queue_deferred_request(op);
|
|
|
|
}
|
|
|
|
|
2016-08-09 14:37:32 -07:00
|
|
|
static int gb_svc_intf_oops_recv(struct gb_operation *op)
|
|
|
|
{
|
|
|
|
struct gb_svc *svc = gb_connection_get_data(op->connection);
|
|
|
|
struct gb_svc_intf_oops_request *request;
|
|
|
|
|
|
|
|
if (op->request->payload_size < sizeof(*request)) {
|
|
|
|
dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
|
|
|
|
op->request->payload_size, sizeof(*request));
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return gb_svc_queue_deferred_request(op);
|
|
|
|
}
|
|
|
|
|
2016-04-23 09:47:31 -07:00
|
|
|
static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
|
|
|
|
{
|
|
|
|
struct gb_svc *svc = gb_connection_get_data(op->connection);
|
|
|
|
struct gb_svc_intf_mailbox_event_request *request;
|
|
|
|
|
|
|
|
if (op->request->payload_size < sizeof(*request)) {
|
|
|
|
dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
op->request->payload_size, sizeof(*request));
|
2016-04-23 09:47:31 -07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
request = op->request->payload;
|
|
|
|
|
|
|
|
dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
|
|
|
|
|
|
|
|
return gb_svc_queue_deferred_request(op);
|
|
|
|
}
|
|
|
|
|
2016-01-19 04:51:15 -07:00
|
|
|
static int gb_svc_request_handler(struct gb_operation *op)
|
2015-05-22 11:02:08 -07:00
|
|
|
{
|
2015-09-03 03:12:22 -07:00
|
|
|
struct gb_connection *connection = op->connection;
|
2016-03-22 11:30:35 -07:00
|
|
|
struct gb_svc *svc = gb_connection_get_data(connection);
|
2016-01-19 04:51:15 -07:00
|
|
|
u8 type = op->type;
|
2015-09-03 03:12:22 -07:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SVC requests need to follow a specific order (at least initially) and
|
|
|
|
* below code takes care of enforcing that. The expected order is:
|
|
|
|
* - PROTOCOL_VERSION
|
|
|
|
* - SVC_HELLO
|
|
|
|
* - Any other request, but the earlier two.
|
|
|
|
*
|
|
|
|
* Incoming requests are guaranteed to be serialized and so we don't
|
|
|
|
* need to protect 'state' for any races.
|
|
|
|
*/
|
2015-05-22 11:02:08 -07:00
|
|
|
switch (type) {
|
2016-04-29 08:08:36 -07:00
|
|
|
case GB_SVC_TYPE_PROTOCOL_VERSION:
|
2015-09-03 03:12:22 -07:00
|
|
|
if (svc->state != GB_SVC_STATE_RESET)
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
2015-07-21 05:14:19 -07:00
|
|
|
case GB_SVC_TYPE_SVC_HELLO:
|
2015-09-03 03:12:22 -07:00
|
|
|
if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (svc->state != GB_SVC_STATE_SVC_HELLO)
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret) {
|
2015-11-25 07:59:19 -07:00
|
|
|
dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
type, svc->state);
|
2015-09-03 03:12:22 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
2016-04-29 08:08:36 -07:00
|
|
|
case GB_SVC_TYPE_PROTOCOL_VERSION:
|
2015-09-03 03:12:22 -07:00
|
|
|
ret = gb_svc_version_request(op);
|
|
|
|
if (!ret)
|
|
|
|
svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
|
|
|
|
return ret;
|
|
|
|
case GB_SVC_TYPE_SVC_HELLO:
|
|
|
|
ret = gb_svc_hello(op);
|
|
|
|
if (!ret)
|
|
|
|
svc->state = GB_SVC_STATE_SVC_HELLO;
|
|
|
|
return ret;
|
2015-05-22 11:02:08 -07:00
|
|
|
case GB_SVC_TYPE_INTF_RESET:
|
|
|
|
return gb_svc_intf_reset_recv(op);
|
2016-04-23 09:47:30 -07:00
|
|
|
case GB_SVC_TYPE_MODULE_INSERTED:
|
|
|
|
return gb_svc_module_inserted_recv(op);
|
|
|
|
case GB_SVC_TYPE_MODULE_REMOVED:
|
|
|
|
return gb_svc_module_removed_recv(op);
|
2016-04-23 09:47:31 -07:00
|
|
|
case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
|
|
|
|
return gb_svc_intf_mailbox_event_recv(op);
|
2016-08-09 14:37:32 -07:00
|
|
|
case GB_SVC_TYPE_INTF_OOPS:
|
|
|
|
return gb_svc_intf_oops_recv(op);
|
2015-05-22 11:02:08 -07:00
|
|
|
default:
|
2015-11-25 07:59:19 -07:00
|
|
|
dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
|
2015-05-22 11:02:08 -07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-25 07:59:06 -07:00
|
|
|
static void gb_svc_release(struct device *dev)
|
|
|
|
{
|
2015-11-25 07:59:08 -07:00
|
|
|
struct gb_svc *svc = to_gb_svc(dev);
|
2015-11-25 07:59:06 -07:00
|
|
|
|
2015-12-07 07:05:37 -07:00
|
|
|
if (svc->connection)
|
|
|
|
gb_connection_destroy(svc->connection);
|
2015-11-25 07:59:06 -07:00
|
|
|
ida_destroy(&svc->device_id_map);
|
2015-12-02 10:23:31 -07:00
|
|
|
destroy_workqueue(svc->wq);
|
2015-11-25 07:59:06 -07:00
|
|
|
kfree(svc);
|
|
|
|
}
|
|
|
|
|
2024-02-19 05:40:50 -07:00
|
|
|
const struct device_type greybus_svc_type = {
|
2015-11-25 07:59:06 -07:00
|
|
|
.name = "greybus_svc",
|
|
|
|
.release = gb_svc_release,
|
|
|
|
};
|
|
|
|
|
2015-12-07 07:05:37 -07:00
|
|
|
struct gb_svc *gb_svc_create(struct gb_host_device *hd)
|
2015-05-22 11:02:08 -07:00
|
|
|
{
|
|
|
|
struct gb_svc *svc;
|
|
|
|
|
|
|
|
svc = kzalloc(sizeof(*svc), GFP_KERNEL);
|
|
|
|
if (!svc)
|
2015-12-07 07:05:37 -07:00
|
|
|
return NULL;
|
2015-05-22 11:02:08 -07:00
|
|
|
|
greybus: Use alloc_ordered_workqueue() to create ordered workqueues
BACKGROUND
==========
When multiple work items are queued to a workqueue, their execution order
doesn't match the queueing order. They may get executed in any order and
simultaneously. When fully serialized execution - one by one in the queueing
order - is needed, an ordered workqueue should be used which can be created
with alloc_ordered_workqueue().
However, alloc_ordered_workqueue() was a later addition. Before it, an
ordered workqueue could be obtained by creating an UNBOUND workqueue with
@max_active==1. This originally was an implementation side-effect which was
broken by 4c16bd327c74 ("workqueue: restore WQ_UNBOUND/max_active==1 to be
ordered"). Because there were users that depended on the ordered execution,
5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered")
made workqueue allocation path to implicitly promote UNBOUND workqueues w/
@max_active==1 to ordered workqueues.
While this has worked okay, overloading the UNBOUND allocation interface
this way creates other issues. It's difficult to tell whether a given
workqueue actually needs to be ordered and users that legitimately want a
min concurrency level wq unexpectedly gets an ordered one instead. With
planned UNBOUND workqueue updates to improve execution locality and more
prevalence of chiplet designs which can benefit from such improvements, this
isn't a state we wanna be in forever.
This patch series audits all callsites that create an UNBOUND workqueue w/
@max_active==1 and converts them to alloc_ordered_workqueue() as necessary.
WHAT TO LOOK FOR
================
The conversions are from
alloc_workqueue(WQ_UNBOUND | flags, 1, args..)
to
alloc_ordered_workqueue(flags, args...)
which don't cause any functional changes. If you know that fully ordered
execution is not ncessary, please let me know. I'll drop the conversion and
instead add a comment noting the fact to reduce confusion while conversion
is in progress.
If you aren't fully sure, it's completely fine to let the conversion
through. The behavior will stay exactly the same and we can always
reconsider later.
As there are follow-up workqueue core changes, I'd really appreciate if the
patch can be routed through the workqueue tree w/ your acks. Thanks.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Acked-by: Johan Hovold <johan@kernel.org>
Acked-by: Alex Elder <elder@kernel.org>
Cc: greybus-dev@lists.linaro.org
2023-05-08 16:52:27 -07:00
|
|
|
svc->wq = alloc_ordered_workqueue("%s:svc", 0, dev_name(&hd->dev));
|
2015-12-02 10:23:31 -07:00
|
|
|
if (!svc->wq) {
|
|
|
|
kfree(svc);
|
2015-12-07 07:05:37 -07:00
|
|
|
return NULL;
|
2015-12-02 10:23:31 -07:00
|
|
|
}
|
|
|
|
|
2015-11-25 07:59:06 -07:00
|
|
|
svc->dev.parent = &hd->dev;
|
|
|
|
svc->dev.bus = &greybus_bus_type;
|
|
|
|
svc->dev.type = &greybus_svc_type;
|
2015-11-25 07:59:09 -07:00
|
|
|
svc->dev.groups = svc_groups;
|
2015-11-25 07:59:06 -07:00
|
|
|
svc->dev.dma_mask = svc->dev.parent->dma_mask;
|
|
|
|
device_initialize(&svc->dev);
|
|
|
|
|
|
|
|
dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
|
|
|
|
|
2015-11-25 07:59:07 -07:00
|
|
|
ida_init(&svc->device_id_map);
|
2015-09-03 03:12:22 -07:00
|
|
|
svc->state = GB_SVC_STATE_RESET;
|
2015-12-03 11:18:02 -07:00
|
|
|
svc->hd = hd;
|
2015-07-21 05:14:18 -07:00
|
|
|
|
2016-01-21 09:34:21 -07:00
|
|
|
svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
|
2018-11-25 09:58:15 -07:00
|
|
|
gb_svc_request_handler);
|
2016-01-21 09:34:16 -07:00
|
|
|
if (IS_ERR(svc->connection)) {
|
|
|
|
dev_err(&svc->dev, "failed to create connection: %ld\n",
|
2018-11-25 09:58:15 -07:00
|
|
|
PTR_ERR(svc->connection));
|
2016-06-28 12:10:14 -07:00
|
|
|
goto err_put_device;
|
2015-12-07 07:05:37 -07:00
|
|
|
}
|
|
|
|
|
2016-03-22 11:30:35 -07:00
|
|
|
gb_connection_set_data(svc->connection, svc);
|
2015-11-25 07:59:06 -07:00
|
|
|
|
2015-12-07 07:05:37 -07:00
|
|
|
return svc;
|
2016-01-20 18:42:17 -07:00
|
|
|
|
|
|
|
err_put_device:
|
|
|
|
put_device(&svc->dev);
|
|
|
|
return NULL;
|
2015-05-22 11:02:08 -07:00
|
|
|
}
|
|
|
|
|
2015-12-07 07:05:37 -07:00
|
|
|
int gb_svc_add(struct gb_svc *svc)
|
2015-05-22 11:02:08 -07:00
|
|
|
{
|
2015-12-07 07:05:37 -07:00
|
|
|
int ret;
|
2015-05-22 11:02:08 -07:00
|
|
|
|
2015-12-07 07:05:37 -07:00
|
|
|
/*
|
|
|
|
* The SVC protocol is currently driven by the SVC, so the SVC device
|
|
|
|
* is added from the connection request handler when enough
|
|
|
|
* information has been received.
|
|
|
|
*/
|
2016-01-21 09:34:21 -07:00
|
|
|
ret = gb_connection_enable(svc->connection);
|
2015-12-07 07:05:37 -07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-23 09:47:24 -07:00
|
|
|
static void gb_svc_remove_modules(struct gb_svc *svc)
|
2016-03-09 04:20:41 -07:00
|
|
|
{
|
2016-04-23 09:47:24 -07:00
|
|
|
struct gb_host_device *hd = svc->hd;
|
|
|
|
struct gb_module *module, *tmp;
|
2016-03-09 04:20:41 -07:00
|
|
|
|
2016-04-23 09:47:24 -07:00
|
|
|
list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
|
|
|
|
gb_module_del(module);
|
|
|
|
list_del(&module->hd_node);
|
|
|
|
gb_module_put(module);
|
2016-03-09 04:20:43 -07:00
|
|
|
}
|
2016-03-09 04:20:41 -07:00
|
|
|
}
|
|
|
|
|
2015-12-07 07:05:37 -07:00
|
|
|
void gb_svc_del(struct gb_svc *svc)
|
|
|
|
{
|
2016-06-13 09:18:28 -07:00
|
|
|
gb_connection_disable_rx(svc->connection);
|
2016-01-20 18:42:17 -07:00
|
|
|
|
2015-12-07 07:05:37 -07:00
|
|
|
/*
|
2016-06-28 12:10:14 -07:00
|
|
|
* The SVC device may have been registered from the request handler.
|
2015-12-07 07:05:37 -07:00
|
|
|
*/
|
2016-01-20 18:42:17 -07:00
|
|
|
if (device_is_registered(&svc->dev)) {
|
2016-04-23 09:47:20 -07:00
|
|
|
gb_svc_debugfs_exit(svc);
|
2016-01-20 23:51:49 -07:00
|
|
|
gb_svc_watchdog_destroy(svc);
|
2015-11-25 07:59:08 -07:00
|
|
|
device_del(&svc->dev);
|
2016-01-20 18:42:17 -07:00
|
|
|
}
|
2015-12-03 09:29:00 -07:00
|
|
|
|
2015-12-07 07:05:37 -07:00
|
|
|
flush_workqueue(svc->wq);
|
2016-03-09 04:20:41 -07:00
|
|
|
|
2016-04-23 09:47:24 -07:00
|
|
|
gb_svc_remove_modules(svc);
|
2016-06-13 09:18:28 -07:00
|
|
|
|
|
|
|
gb_connection_disable(svc->connection);
|
2015-12-07 07:05:37 -07:00
|
|
|
}
|
2015-11-25 07:59:06 -07:00
|
|
|
|
2015-12-07 07:05:37 -07:00
|
|
|
void gb_svc_put(struct gb_svc *svc)
|
|
|
|
{
|
2015-11-25 07:59:06 -07:00
|
|
|
put_device(&svc->dev);
|
2015-05-22 11:02:08 -07:00
|
|
|
}
|