linux/drivers/staging/greybus/svc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * SVC Greybus driver.
   4 *
   5 * Copyright 2015 Google Inc.
   6 * Copyright 2015 Linaro Ltd.
   7 */
   8
   9#include <linux/debugfs.h>
  10#include <linux/workqueue.h>
  11
  12#include "greybus.h"
  13
  14#define SVC_INTF_EJECT_TIMEOUT          9000
  15#define SVC_INTF_ACTIVATE_TIMEOUT       6000
  16#define SVC_INTF_RESUME_TIMEOUT         3000
  17
  18struct gb_svc_deferred_request {
  19        struct work_struct work;
  20        struct gb_operation *operation;
  21};
  22
  23static int gb_svc_queue_deferred_request(struct gb_operation *operation);
  24
  25static ssize_t endo_id_show(struct device *dev,
  26                            struct device_attribute *attr, char *buf)
  27{
  28        struct gb_svc *svc = to_gb_svc(dev);
  29
  30        return sprintf(buf, "0x%04x\n", svc->endo_id);
  31}
  32static DEVICE_ATTR_RO(endo_id);
  33
  34static ssize_t ap_intf_id_show(struct device *dev,
  35                               struct device_attribute *attr, char *buf)
  36{
  37        struct gb_svc *svc = to_gb_svc(dev);
  38
  39        return sprintf(buf, "%u\n", svc->ap_intf_id);
  40}
  41static DEVICE_ATTR_RO(ap_intf_id);
  42
  43// FIXME
  44// This is a hack, we need to do this "right" and clean the interface up
  45// properly, not just forcibly yank the thing out of the system and hope for the
  46// best.  But for now, people want their modules to come out without having to
  47// throw the thing to the ground or get out a screwdriver.
  48static ssize_t intf_eject_store(struct device *dev,
  49                                struct device_attribute *attr, const char *buf,
  50                                size_t len)
  51{
  52        struct gb_svc *svc = to_gb_svc(dev);
  53        unsigned short intf_id;
  54        int ret;
  55
  56        ret = kstrtou16(buf, 10, &intf_id);
  57        if (ret < 0)
  58                return ret;
  59
  60        dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
  61
  62        ret = gb_svc_intf_eject(svc, intf_id);
  63        if (ret < 0)
  64                return ret;
  65
  66        return len;
  67}
  68static DEVICE_ATTR_WO(intf_eject);
  69
  70static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
  71                             char *buf)
  72{
  73        struct gb_svc *svc = to_gb_svc(dev);
  74
  75        return sprintf(buf, "%s\n",
  76                       gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
  77}
  78
  79static ssize_t watchdog_store(struct device *dev,
  80                              struct device_attribute *attr, const char *buf,
  81                              size_t len)
  82{
  83        struct gb_svc *svc = to_gb_svc(dev);
  84        int retval;
  85        bool user_request;
  86
  87        retval = strtobool(buf, &user_request);
  88        if (retval)
  89                return retval;
  90
  91        if (user_request)
  92                retval = gb_svc_watchdog_enable(svc);
  93        else
  94                retval = gb_svc_watchdog_disable(svc);
  95        if (retval)
  96                return retval;
  97        return len;
  98}
  99static DEVICE_ATTR_RW(watchdog);
 100
 101static ssize_t watchdog_action_show(struct device *dev,
 102                                    struct device_attribute *attr, char *buf)
 103{
 104        struct gb_svc *svc = to_gb_svc(dev);
 105
 106        if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
 107                return sprintf(buf, "panic\n");
 108        else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
 109                return sprintf(buf, "reset\n");
 110
 111        return -EINVAL;
 112}
 113
 114static ssize_t watchdog_action_store(struct device *dev,
 115                                     struct device_attribute *attr,
 116                                     const char *buf, size_t len)
 117{
 118        struct gb_svc *svc = to_gb_svc(dev);
 119
 120        if (sysfs_streq(buf, "panic"))
 121                svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
 122        else if (sysfs_streq(buf, "reset"))
 123                svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
 124        else
 125                return -EINVAL;
 126
 127        return len;
 128}
 129static DEVICE_ATTR_RW(watchdog_action);
 130
 131static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
 132{
 133        struct gb_svc_pwrmon_rail_count_get_response response;
 134        int ret;
 135
 136        ret = gb_operation_sync(svc->connection,
 137                                GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
 138                                &response, sizeof(response));
 139        if (ret) {
 140                dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
 141                return ret;
 142        }
 143
 144        *value = response.rail_count;
 145
 146        return 0;
 147}
 148
 149static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
 150                struct gb_svc_pwrmon_rail_names_get_response *response,
 151                size_t bufsize)
 152{
 153        int ret;
 154
 155        ret = gb_operation_sync(svc->connection,
 156                                GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
 157                                response, bufsize);
 158        if (ret) {
 159                dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
 160                return ret;
 161        }
 162
 163        if (response->status != GB_SVC_OP_SUCCESS) {
 164                dev_err(&svc->dev,
 165                        "SVC error while getting rail names: %u\n",
 166                        response->status);
 167                return -EREMOTEIO;
 168        }
 169
 170        return 0;
 171}
 172
 173static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
 174                                    u8 measurement_type, u32 *value)
 175{
 176        struct gb_svc_pwrmon_sample_get_request request;
 177        struct gb_svc_pwrmon_sample_get_response response;
 178        int ret;
 179
 180        request.rail_id = rail_id;
 181        request.measurement_type = measurement_type;
 182
 183        ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
 184                                &request, sizeof(request),
 185                                &response, sizeof(response));
 186        if (ret) {
 187                dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
 188                return ret;
 189        }
 190
 191        if (response.result) {
 192                dev_err(&svc->dev,
 193                        "UniPro error while getting rail power sample (%d %d): %d\n",
 194                        rail_id, measurement_type, response.result);
 195                switch (response.result) {
 196                case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
 197                        return -EINVAL;
 198                case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
 199                        return -ENOMSG;
 200                default:
 201                        return -EREMOTEIO;
 202                }
 203        }
 204
 205        *value = le32_to_cpu(response.measurement);
 206
 207        return 0;
 208}
 209
 210int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
 211                                  u8 measurement_type, u32 *value)
 212{
 213        struct gb_svc_pwrmon_intf_sample_get_request request;
 214        struct gb_svc_pwrmon_intf_sample_get_response response;
 215        int ret;
 216
 217        request.intf_id = intf_id;
 218        request.measurement_type = measurement_type;
 219
 220        ret = gb_operation_sync(svc->connection,
 221                                GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
 222                                &request, sizeof(request),
 223                                &response, sizeof(response));
 224        if (ret) {
 225                dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
 226                return ret;
 227        }
 228
 229        if (response.result) {
 230                dev_err(&svc->dev,
 231                        "UniPro error while getting intf power sample (%d %d): %d\n",
 232                        intf_id, measurement_type, response.result);
 233                switch (response.result) {
 234                case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
 235                        return -EINVAL;
 236                case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
 237                        return -ENOMSG;
 238                default:
 239                        return -EREMOTEIO;
 240                }
 241        }
 242
 243        *value = le32_to_cpu(response.measurement);
 244
 245        return 0;
 246}
 247
 248static struct attribute *svc_attrs[] = {
 249        &dev_attr_endo_id.attr,
 250        &dev_attr_ap_intf_id.attr,
 251        &dev_attr_intf_eject.attr,
 252        &dev_attr_watchdog.attr,
 253        &dev_attr_watchdog_action.attr,
 254        NULL,
 255};
 256ATTRIBUTE_GROUPS(svc);
 257
 258int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
 259{
 260        struct gb_svc_intf_device_id_request request;
 261
 262        request.intf_id = intf_id;
 263        request.device_id = device_id;
 264
 265        return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
 266                                 &request, sizeof(request), NULL, 0);
 267}
 268
 269int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
 270{
 271        struct gb_svc_intf_eject_request request;
 272        int ret;
 273
 274        request.intf_id = intf_id;
 275
 276        /*
 277         * The pulse width for module release in svc is long so we need to
 278         * increase the timeout so the operation will not return to soon.
 279         */
 280        ret = gb_operation_sync_timeout(svc->connection,
 281                                        GB_SVC_TYPE_INTF_EJECT, &request,
 282                                        sizeof(request), NULL, 0,
 283                                        SVC_INTF_EJECT_TIMEOUT);
 284        if (ret) {
 285                dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
 286                return ret;
 287        }
 288
 289        return 0;
 290}
 291
 292int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
 293{
 294        struct gb_svc_intf_vsys_request request;
 295        struct gb_svc_intf_vsys_response response;
 296        int type, ret;
 297
 298        request.intf_id = intf_id;
 299
 300        if (enable)
 301                type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
 302        else
 303                type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
 304
 305        ret = gb_operation_sync(svc->connection, type,
 306                                &request, sizeof(request),
 307                                &response, sizeof(response));
 308        if (ret < 0)
 309                return ret;
 310        if (response.result_code != GB_SVC_INTF_VSYS_OK)
 311                return -EREMOTEIO;
 312        return 0;
 313}
 314
 315int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
 316{
 317        struct gb_svc_intf_refclk_request request;
 318        struct gb_svc_intf_refclk_response response;
 319        int type, ret;
 320
 321        request.intf_id = intf_id;
 322
 323        if (enable)
 324                type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
 325        else
 326                type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
 327
 328        ret = gb_operation_sync(svc->connection, type,
 329                                &request, sizeof(request),
 330                                &response, sizeof(response));
 331        if (ret < 0)
 332                return ret;
 333        if (response.result_code != GB_SVC_INTF_REFCLK_OK)
 334                return -EREMOTEIO;
 335        return 0;
 336}
 337
 338int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
 339{
 340        struct gb_svc_intf_unipro_request request;
 341        struct gb_svc_intf_unipro_response response;
 342        int type, ret;
 343
 344        request.intf_id = intf_id;
 345
 346        if (enable)
 347                type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
 348        else
 349                type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
 350
 351        ret = gb_operation_sync(svc->connection, type,
 352                                &request, sizeof(request),
 353                                &response, sizeof(response));
 354        if (ret < 0)
 355                return ret;
 356        if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
 357                return -EREMOTEIO;
 358        return 0;
 359}
 360
 361int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
 362{
 363        struct gb_svc_intf_activate_request request;
 364        struct gb_svc_intf_activate_response response;
 365        int ret;
 366
 367        request.intf_id = intf_id;
 368
 369        ret = gb_operation_sync_timeout(svc->connection,
 370                                        GB_SVC_TYPE_INTF_ACTIVATE,
 371                                        &request, sizeof(request),
 372                                        &response, sizeof(response),
 373                                        SVC_INTF_ACTIVATE_TIMEOUT);
 374        if (ret < 0)
 375                return ret;
 376        if (response.status != GB_SVC_OP_SUCCESS) {
 377                dev_err(&svc->dev, "failed to activate interface %u: %u\n",
 378                        intf_id, response.status);
 379                return -EREMOTEIO;
 380        }
 381
 382        *intf_type = response.intf_type;
 383
 384        return 0;
 385}
 386
 387int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
 388{
 389        struct gb_svc_intf_resume_request request;
 390        struct gb_svc_intf_resume_response response;
 391        int ret;
 392
 393        request.intf_id = intf_id;
 394
 395        ret = gb_operation_sync_timeout(svc->connection,
 396                                        GB_SVC_TYPE_INTF_RESUME,
 397                                        &request, sizeof(request),
 398                                        &response, sizeof(response),
 399                                        SVC_INTF_RESUME_TIMEOUT);
 400        if (ret < 0) {
 401                dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
 402                        intf_id, ret);
 403                return ret;
 404        }
 405
 406        if (response.status != GB_SVC_OP_SUCCESS) {
 407                dev_err(&svc->dev, "failed to resume interface %u: %u\n",
 408                        intf_id, response.status);
 409                return -EREMOTEIO;
 410        }
 411
 412        return 0;
 413}
 414
 415int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
 416                        u32 *value)
 417{
 418        struct gb_svc_dme_peer_get_request request;
 419        struct gb_svc_dme_peer_get_response response;
 420        u16 result;
 421        int ret;
 422
 423        request.intf_id = intf_id;
 424        request.attr = cpu_to_le16(attr);
 425        request.selector = cpu_to_le16(selector);
 426
 427        ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
 428                                &request, sizeof(request),
 429                                &response, sizeof(response));
 430        if (ret) {
 431                dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
 432                        intf_id, attr, selector, ret);
 433                return ret;
 434        }
 435
 436        result = le16_to_cpu(response.result_code);
 437        if (result) {
 438                dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
 439                        intf_id, attr, selector, result);
 440                return -EREMOTEIO;
 441        }
 442
 443        if (value)
 444                *value = le32_to_cpu(response.attr_value);
 445
 446        return 0;
 447}
 448
 449int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
 450                        u32 value)
 451{
 452        struct gb_svc_dme_peer_set_request request;
 453        struct gb_svc_dme_peer_set_response response;
 454        u16 result;
 455        int ret;
 456
 457        request.intf_id = intf_id;
 458        request.attr = cpu_to_le16(attr);
 459        request.selector = cpu_to_le16(selector);
 460        request.value = cpu_to_le32(value);
 461
 462        ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
 463                                &request, sizeof(request),
 464                                &response, sizeof(response));
 465        if (ret) {
 466                dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
 467                        intf_id, attr, selector, value, ret);
 468                return ret;
 469        }
 470
 471        result = le16_to_cpu(response.result_code);
 472        if (result) {
 473                dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
 474                        intf_id, attr, selector, value, result);
 475                return -EREMOTEIO;
 476        }
 477
 478        return 0;
 479}
 480
 481int gb_svc_connection_create(struct gb_svc *svc,
 482                             u8 intf1_id, u16 cport1_id,
 483                             u8 intf2_id, u16 cport2_id,
 484                             u8 cport_flags)
 485{
 486        struct gb_svc_conn_create_request request;
 487
 488        request.intf1_id = intf1_id;
 489        request.cport1_id = cpu_to_le16(cport1_id);
 490        request.intf2_id = intf2_id;
 491        request.cport2_id = cpu_to_le16(cport2_id);
 492        request.tc = 0;         /* TC0 */
 493        request.flags = cport_flags;
 494
 495        return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
 496                                 &request, sizeof(request), NULL, 0);
 497}
 498
 499void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
 500                               u8 intf2_id, u16 cport2_id)
 501{
 502        struct gb_svc_conn_destroy_request request;
 503        struct gb_connection *connection = svc->connection;
 504        int ret;
 505
 506        request.intf1_id = intf1_id;
 507        request.cport1_id = cpu_to_le16(cport1_id);
 508        request.intf2_id = intf2_id;
 509        request.cport2_id = cpu_to_le16(cport2_id);
 510
 511        ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
 512                                &request, sizeof(request), NULL, 0);
 513        if (ret) {
 514                dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
 515                        intf1_id, cport1_id, intf2_id, cport2_id, ret);
 516        }
 517}
 518
 519/* Creates bi-directional routes between the devices */
 520int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
 521                        u8 intf2_id, u8 dev2_id)
 522{
 523        struct gb_svc_route_create_request request;
 524
 525        request.intf1_id = intf1_id;
 526        request.dev1_id = dev1_id;
 527        request.intf2_id = intf2_id;
 528        request.dev2_id = dev2_id;
 529
 530        return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
 531                                 &request, sizeof(request), NULL, 0);
 532}
 533
 534/* Destroys bi-directional routes between the devices */
 535void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
 536{
 537        struct gb_svc_route_destroy_request request;
 538        int ret;
 539
 540        request.intf1_id = intf1_id;
 541        request.intf2_id = intf2_id;
 542
 543        ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
 544                                &request, sizeof(request), NULL, 0);
 545        if (ret) {
 546                dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
 547                        intf1_id, intf2_id, ret);
 548        }
 549}
 550
 551int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
 552                               u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
 553                               u8 tx_amplitude, u8 tx_hs_equalizer,
 554                               u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
 555                               u8 flags, u32 quirks,
 556                               struct gb_svc_l2_timer_cfg *local,
 557                               struct gb_svc_l2_timer_cfg *remote)
 558{
 559        struct gb_svc_intf_set_pwrm_request request;
 560        struct gb_svc_intf_set_pwrm_response response;
 561        int ret;
 562        u16 result_code;
 563
 564        memset(&request, 0, sizeof(request));
 565
 566        request.intf_id = intf_id;
 567        request.hs_series = hs_series;
 568        request.tx_mode = tx_mode;
 569        request.tx_gear = tx_gear;
 570        request.tx_nlanes = tx_nlanes;
 571        request.tx_amplitude = tx_amplitude;
 572        request.tx_hs_equalizer = tx_hs_equalizer;
 573        request.rx_mode = rx_mode;
 574        request.rx_gear = rx_gear;
 575        request.rx_nlanes = rx_nlanes;
 576        request.flags = flags;
 577        request.quirks = cpu_to_le32(quirks);
 578        if (local)
 579                request.local_l2timerdata = *local;
 580        if (remote)
 581                request.remote_l2timerdata = *remote;
 582
 583        ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
 584                                &request, sizeof(request),
 585                                &response, sizeof(response));
 586        if (ret < 0)
 587                return ret;
 588
 589        result_code = response.result_code;
 590        if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
 591                dev_err(&svc->dev, "set power mode = %d\n", result_code);
 592                return -EIO;
 593        }
 594
 595        return 0;
 596}
 597EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
 598
 599int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
 600{
 601        struct gb_svc_intf_set_pwrm_request request;
 602        struct gb_svc_intf_set_pwrm_response response;
 603        int ret;
 604        u16 result_code;
 605
 606        memset(&request, 0, sizeof(request));
 607
 608        request.intf_id = intf_id;
 609        request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
 610        request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
 611        request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
 612
 613        ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
 614                                &request, sizeof(request),
 615                                &response, sizeof(response));
 616        if (ret < 0) {
 617                dev_err(&svc->dev,
 618                        "failed to send set power mode operation to interface %u: %d\n",
 619                        intf_id, ret);
 620                return ret;
 621        }
 622
 623        result_code = response.result_code;
 624        if (result_code != GB_SVC_SETPWRM_PWR_OK) {
 625                dev_err(&svc->dev,
 626                        "failed to hibernate the link for interface %u: %u\n",
 627                        intf_id, result_code);
 628                return -EIO;
 629        }
 630
 631        return 0;
 632}
 633
 634int gb_svc_ping(struct gb_svc *svc)
 635{
 636        return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
 637                                         NULL, 0, NULL, 0,
 638                                         GB_OPERATION_TIMEOUT_DEFAULT * 2);
 639}
 640
 641static int gb_svc_version_request(struct gb_operation *op)
 642{
 643        struct gb_connection *connection = op->connection;
 644        struct gb_svc *svc = gb_connection_get_data(connection);
 645        struct gb_svc_version_request *request;
 646        struct gb_svc_version_response *response;
 647
 648        if (op->request->payload_size < sizeof(*request)) {
 649                dev_err(&svc->dev, "short version request (%zu < %zu)\n",
 650                        op->request->payload_size,
 651                        sizeof(*request));
 652                return -EINVAL;
 653        }
 654
 655        request = op->request->payload;
 656
 657        if (request->major > GB_SVC_VERSION_MAJOR) {
 658                dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
 659                         request->major, GB_SVC_VERSION_MAJOR);
 660                return -ENOTSUPP;
 661        }
 662
 663        svc->protocol_major = request->major;
 664        svc->protocol_minor = request->minor;
 665
 666        if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
 667                return -ENOMEM;
 668
 669        response = op->response->payload;
 670        response->major = svc->protocol_major;
 671        response->minor = svc->protocol_minor;
 672
 673        return 0;
 674}
 675
 676static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
 677                                        size_t len, loff_t *offset)
 678{
 679        struct svc_debugfs_pwrmon_rail *pwrmon_rails =
 680                file_inode(file)->i_private;
 681        struct gb_svc *svc = pwrmon_rails->svc;
 682        int ret, desc;
 683        u32 value;
 684        char buff[16];
 685
 686        ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
 687                                       GB_SVC_PWRMON_TYPE_VOL, &value);
 688        if (ret) {
 689                dev_err(&svc->dev,
 690                        "failed to get voltage sample %u: %d\n",
 691                        pwrmon_rails->id, ret);
 692                return ret;
 693        }
 694
 695        desc = scnprintf(buff, sizeof(buff), "%u\n", value);
 696
 697        return simple_read_from_buffer(buf, len, offset, buff, desc);
 698}
 699
 700static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
 701                                        size_t len, loff_t *offset)
 702{
 703        struct svc_debugfs_pwrmon_rail *pwrmon_rails =
 704                file_inode(file)->i_private;
 705        struct gb_svc *svc = pwrmon_rails->svc;
 706        int ret, desc;
 707        u32 value;
 708        char buff[16];
 709
 710        ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
 711                                       GB_SVC_PWRMON_TYPE_CURR, &value);
 712        if (ret) {
 713                dev_err(&svc->dev,
 714                        "failed to get current sample %u: %d\n",
 715                        pwrmon_rails->id, ret);
 716                return ret;
 717        }
 718
 719        desc = scnprintf(buff, sizeof(buff), "%u\n", value);
 720
 721        return simple_read_from_buffer(buf, len, offset, buff, desc);
 722}
 723
 724static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
 725                                      size_t len, loff_t *offset)
 726{
 727        struct svc_debugfs_pwrmon_rail *pwrmon_rails =
 728                file_inode(file)->i_private;
 729        struct gb_svc *svc = pwrmon_rails->svc;
 730        int ret, desc;
 731        u32 value;
 732        char buff[16];
 733
 734        ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
 735                                       GB_SVC_PWRMON_TYPE_PWR, &value);
 736        if (ret) {
 737                dev_err(&svc->dev, "failed to get power sample %u: %d\n",
 738                        pwrmon_rails->id, ret);
 739                return ret;
 740        }
 741
 742        desc = scnprintf(buff, sizeof(buff), "%u\n", value);
 743
 744        return simple_read_from_buffer(buf, len, offset, buff, desc);
 745}
 746
 747static const struct file_operations pwrmon_debugfs_voltage_fops = {
 748        .read           = pwr_debugfs_voltage_read,
 749};
 750
 751static const struct file_operations pwrmon_debugfs_current_fops = {
 752        .read           = pwr_debugfs_current_read,
 753};
 754
 755static const struct file_operations pwrmon_debugfs_power_fops = {
 756        .read           = pwr_debugfs_power_read,
 757};
 758
 759static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
 760{
 761        int i;
 762        size_t bufsize;
 763        struct dentry *dent;
 764        struct gb_svc_pwrmon_rail_names_get_response *rail_names;
 765        u8 rail_count;
 766
 767        dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
 768        if (IS_ERR_OR_NULL(dent))
 769                return;
 770
 771        if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
 772                goto err_pwrmon_debugfs;
 773
 774        if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
 775                goto err_pwrmon_debugfs;
 776
 777        bufsize = sizeof(*rail_names) +
 778                GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
 779
 780        rail_names = kzalloc(bufsize, GFP_KERNEL);
 781        if (!rail_names)
 782                goto err_pwrmon_debugfs;
 783
 784        svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
 785                                    GFP_KERNEL);
 786        if (!svc->pwrmon_rails)
 787                goto err_pwrmon_debugfs_free;
 788
 789        if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
 790                goto err_pwrmon_debugfs_free;
 791
 792        for (i = 0; i < rail_count; i++) {
 793                struct dentry *dir;
 794                struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
 795                char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
 796
 797                snprintf(fname, sizeof(fname), "%s",
 798                         (char *)&rail_names->name[i]);
 799
 800                rail->id = i;
 801                rail->svc = svc;
 802
 803                dir = debugfs_create_dir(fname, dent);
 804                debugfs_create_file("voltage_now", 0444, dir, rail,
 805                                    &pwrmon_debugfs_voltage_fops);
 806                debugfs_create_file("current_now", 0444, dir, rail,
 807                                    &pwrmon_debugfs_current_fops);
 808                debugfs_create_file("power_now", 0444, dir, rail,
 809                                    &pwrmon_debugfs_power_fops);
 810        }
 811
 812        kfree(rail_names);
 813        return;
 814
 815err_pwrmon_debugfs_free:
 816        kfree(rail_names);
 817        kfree(svc->pwrmon_rails);
 818        svc->pwrmon_rails = NULL;
 819
 820err_pwrmon_debugfs:
 821        debugfs_remove(dent);
 822}
 823
 824static void gb_svc_debugfs_init(struct gb_svc *svc)
 825{
 826        svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
 827                                                 gb_debugfs_get());
 828        gb_svc_pwrmon_debugfs_init(svc);
 829}
 830
 831static void gb_svc_debugfs_exit(struct gb_svc *svc)
 832{
 833        debugfs_remove_recursive(svc->debugfs_dentry);
 834        kfree(svc->pwrmon_rails);
 835        svc->pwrmon_rails = NULL;
 836}
 837
 838static int gb_svc_hello(struct gb_operation *op)
 839{
 840        struct gb_connection *connection = op->connection;
 841        struct gb_svc *svc = gb_connection_get_data(connection);
 842        struct gb_svc_hello_request *hello_request;
 843        int ret;
 844
 845        if (op->request->payload_size < sizeof(*hello_request)) {
 846                dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
 847                         op->request->payload_size,
 848                         sizeof(*hello_request));
 849                return -EINVAL;
 850        }
 851
 852        hello_request = op->request->payload;
 853        svc->endo_id = le16_to_cpu(hello_request->endo_id);
 854        svc->ap_intf_id = hello_request->interface_id;
 855
 856        ret = device_add(&svc->dev);
 857        if (ret) {
 858                dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
 859                return ret;
 860        }
 861
 862        ret = gb_svc_watchdog_create(svc);
 863        if (ret) {
 864                dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
 865                goto err_unregister_device;
 866        }
 867
 868        gb_svc_debugfs_init(svc);
 869
 870        return gb_svc_queue_deferred_request(op);
 871
 872err_unregister_device:
 873        gb_svc_watchdog_destroy(svc);
 874        device_del(&svc->dev);
 875        return ret;
 876}
 877
 878static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
 879                                                    u8 intf_id)
 880{
 881        struct gb_host_device *hd = svc->hd;
 882        struct gb_module *module;
 883        size_t num_interfaces;
 884        u8 module_id;
 885
 886        list_for_each_entry(module, &hd->modules, hd_node) {
 887                module_id = module->module_id;
 888                num_interfaces = module->num_interfaces;
 889
 890                if (intf_id >= module_id &&
 891                    intf_id < module_id + num_interfaces) {
 892                        return module->interfaces[intf_id - module_id];
 893                }
 894        }
 895
 896        return NULL;
 897}
 898
 899static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
 900{
 901        struct gb_host_device *hd = svc->hd;
 902        struct gb_module *module;
 903
 904        list_for_each_entry(module, &hd->modules, hd_node) {
 905                if (module->module_id == module_id)
 906                        return module;
 907        }
 908
 909        return NULL;
 910}
 911
 912static void gb_svc_process_hello_deferred(struct gb_operation *operation)
 913{
 914        struct gb_connection *connection = operation->connection;
 915        struct gb_svc *svc = gb_connection_get_data(connection);
 916        int ret;
 917
 918        /*
 919         * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
 920         * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
 921         * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
 922         * module.
 923         *
 924         * The code should be removed once SW-2217, Heuristic for UniPro
 925         * Power Mode Changes is resolved.
 926         */
 927        ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
 928                                         GB_SVC_UNIPRO_HS_SERIES_A,
 929                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
 930                                         2, 1,
 931                                         GB_SVC_SMALL_AMPLITUDE,
 932                                         GB_SVC_NO_DE_EMPHASIS,
 933                                         GB_SVC_UNIPRO_SLOW_AUTO_MODE,
 934                                         2, 1,
 935                                         0, 0,
 936                                         NULL, NULL);
 937
 938        if (ret)
 939                dev_warn(&svc->dev,
 940                         "power mode change failed on AP to switch link: %d\n",
 941                         ret);
 942}
 943
 944static void gb_svc_process_module_inserted(struct gb_operation *operation)
 945{
 946        struct gb_svc_module_inserted_request *request;
 947        struct gb_connection *connection = operation->connection;
 948        struct gb_svc *svc = gb_connection_get_data(connection);
 949        struct gb_host_device *hd = svc->hd;
 950        struct gb_module *module;
 951        size_t num_interfaces;
 952        u8 module_id;
 953        u16 flags;
 954        int ret;
 955
 956        /* The request message size has already been verified. */
 957        request = operation->request->payload;
 958        module_id = request->primary_intf_id;
 959        num_interfaces = request->intf_count;
 960        flags = le16_to_cpu(request->flags);
 961
 962        dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
 963                __func__, module_id, num_interfaces, flags);
 964
 965        if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
 966                dev_warn(&svc->dev, "no primary interface detected on module %u\n",
 967                         module_id);
 968        }
 969
 970        module = gb_svc_module_lookup(svc, module_id);
 971        if (module) {
 972                dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
 973                         module_id);
 974                return;
 975        }
 976
 977        module = gb_module_create(hd, module_id, num_interfaces);
 978        if (!module) {
 979                dev_err(&svc->dev, "failed to create module\n");
 980                return;
 981        }
 982
 983        ret = gb_module_add(module);
 984        if (ret) {
 985                gb_module_put(module);
 986                return;
 987        }
 988
 989        list_add(&module->hd_node, &hd->modules);
 990}
 991
 992static void gb_svc_process_module_removed(struct gb_operation *operation)
 993{
 994        struct gb_svc_module_removed_request *request;
 995        struct gb_connection *connection = operation->connection;
 996        struct gb_svc *svc = gb_connection_get_data(connection);
 997        struct gb_module *module;
 998        u8 module_id;
 999
1000        /* The request message size has already been verified. */
1001        request = operation->request->payload;
1002        module_id = request->primary_intf_id;
1003
1004        dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
1005
1006        module = gb_svc_module_lookup(svc, module_id);
1007        if (!module) {
1008                dev_warn(&svc->dev, "unexpected module-removed event %u\n",
1009                         module_id);
1010                return;
1011        }
1012
1013        module->disconnected = true;
1014
1015        gb_module_del(module);
1016        list_del(&module->hd_node);
1017        gb_module_put(module);
1018}
1019
1020static void gb_svc_process_intf_oops(struct gb_operation *operation)
1021{
1022        struct gb_svc_intf_oops_request *request;
1023        struct gb_connection *connection = operation->connection;
1024        struct gb_svc *svc = gb_connection_get_data(connection);
1025        struct gb_interface *intf;
1026        u8 intf_id;
1027        u8 reason;
1028
1029        /* The request message size has already been verified. */
1030        request = operation->request->payload;
1031        intf_id = request->intf_id;
1032        reason = request->reason;
1033
1034        intf = gb_svc_interface_lookup(svc, intf_id);
1035        if (!intf) {
1036                dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
1037                         intf_id);
1038                return;
1039        }
1040
1041        dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
1042                 intf_id, reason);
1043
1044        mutex_lock(&intf->mutex);
1045        intf->disconnected = true;
1046        gb_interface_disable(intf);
1047        gb_interface_deactivate(intf);
1048        mutex_unlock(&intf->mutex);
1049}
1050
1051static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
1052{
1053        struct gb_svc_intf_mailbox_event_request *request;
1054        struct gb_connection *connection = operation->connection;
1055        struct gb_svc *svc = gb_connection_get_data(connection);
1056        struct gb_interface *intf;
1057        u8 intf_id;
1058        u16 result_code;
1059        u32 mailbox;
1060
1061        /* The request message size has already been verified. */
1062        request = operation->request->payload;
1063        intf_id = request->intf_id;
1064        result_code = le16_to_cpu(request->result_code);
1065        mailbox = le32_to_cpu(request->mailbox);
1066
1067        dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
1068                __func__, intf_id, result_code, mailbox);
1069
1070        intf = gb_svc_interface_lookup(svc, intf_id);
1071        if (!intf) {
1072                dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
1073                return;
1074        }
1075
1076        gb_interface_mailbox_event(intf, result_code, mailbox);
1077}
1078
1079static void gb_svc_process_deferred_request(struct work_struct *work)
1080{
1081        struct gb_svc_deferred_request *dr;
1082        struct gb_operation *operation;
1083        struct gb_svc *svc;
1084        u8 type;
1085
1086        dr = container_of(work, struct gb_svc_deferred_request, work);
1087        operation = dr->operation;
1088        svc = gb_connection_get_data(operation->connection);
1089        type = operation->request->header->type;
1090
1091        switch (type) {
1092        case GB_SVC_TYPE_SVC_HELLO:
1093                gb_svc_process_hello_deferred(operation);
1094                break;
1095        case GB_SVC_TYPE_MODULE_INSERTED:
1096                gb_svc_process_module_inserted(operation);
1097                break;
1098        case GB_SVC_TYPE_MODULE_REMOVED:
1099                gb_svc_process_module_removed(operation);
1100                break;
1101        case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1102                gb_svc_process_intf_mailbox_event(operation);
1103                break;
1104        case GB_SVC_TYPE_INTF_OOPS:
1105                gb_svc_process_intf_oops(operation);
1106                break;
1107        default:
1108                dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
1109        }
1110
1111        gb_operation_put(operation);
1112        kfree(dr);
1113}
1114
1115static int gb_svc_queue_deferred_request(struct gb_operation *operation)
1116{
1117        struct gb_svc *svc = gb_connection_get_data(operation->connection);
1118        struct gb_svc_deferred_request *dr;
1119
1120        dr = kmalloc(sizeof(*dr), GFP_KERNEL);
1121        if (!dr)
1122                return -ENOMEM;
1123
1124        gb_operation_get(operation);
1125
1126        dr->operation = operation;
1127        INIT_WORK(&dr->work, gb_svc_process_deferred_request);
1128
1129        queue_work(svc->wq, &dr->work);
1130
1131        return 0;
1132}
1133
1134static int gb_svc_intf_reset_recv(struct gb_operation *op)
1135{
1136        struct gb_svc *svc = gb_connection_get_data(op->connection);
1137        struct gb_message *request = op->request;
1138        struct gb_svc_intf_reset_request *reset;
1139
1140        if (request->payload_size < sizeof(*reset)) {
1141                dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
1142                         request->payload_size, sizeof(*reset));
1143                return -EINVAL;
1144        }
1145        reset = request->payload;
1146
1147        /* FIXME Reset the interface here */
1148
1149        return 0;
1150}
1151
1152static int gb_svc_module_inserted_recv(struct gb_operation *op)
1153{
1154        struct gb_svc *svc = gb_connection_get_data(op->connection);
1155        struct gb_svc_module_inserted_request *request;
1156
1157        if (op->request->payload_size < sizeof(*request)) {
1158                dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
1159                         op->request->payload_size, sizeof(*request));
1160                return -EINVAL;
1161        }
1162
1163        request = op->request->payload;
1164
1165        dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1166                request->primary_intf_id);
1167
1168        return gb_svc_queue_deferred_request(op);
1169}
1170
1171static int gb_svc_module_removed_recv(struct gb_operation *op)
1172{
1173        struct gb_svc *svc = gb_connection_get_data(op->connection);
1174        struct gb_svc_module_removed_request *request;
1175
1176        if (op->request->payload_size < sizeof(*request)) {
1177                dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
1178                         op->request->payload_size, sizeof(*request));
1179                return -EINVAL;
1180        }
1181
1182        request = op->request->payload;
1183
1184        dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1185                request->primary_intf_id);
1186
1187        return gb_svc_queue_deferred_request(op);
1188}
1189
1190static int gb_svc_intf_oops_recv(struct gb_operation *op)
1191{
1192        struct gb_svc *svc = gb_connection_get_data(op->connection);
1193        struct gb_svc_intf_oops_request *request;
1194
1195        if (op->request->payload_size < sizeof(*request)) {
1196                dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
1197                         op->request->payload_size, sizeof(*request));
1198                return -EINVAL;
1199        }
1200
1201        return gb_svc_queue_deferred_request(op);
1202}
1203
1204static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
1205{
1206        struct gb_svc *svc = gb_connection_get_data(op->connection);
1207        struct gb_svc_intf_mailbox_event_request *request;
1208
1209        if (op->request->payload_size < sizeof(*request)) {
1210                dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
1211                         op->request->payload_size, sizeof(*request));
1212                return -EINVAL;
1213        }
1214
1215        request = op->request->payload;
1216
1217        dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1218
1219        return gb_svc_queue_deferred_request(op);
1220}
1221
1222static int gb_svc_request_handler(struct gb_operation *op)
1223{
1224        struct gb_connection *connection = op->connection;
1225        struct gb_svc *svc = gb_connection_get_data(connection);
1226        u8 type = op->type;
1227        int ret = 0;
1228
1229        /*
1230         * SVC requests need to follow a specific order (at least initially) and
1231         * below code takes care of enforcing that. The expected order is:
1232         * - PROTOCOL_VERSION
1233         * - SVC_HELLO
1234         * - Any other request, but the earlier two.
1235         *
1236         * Incoming requests are guaranteed to be serialized and so we don't
1237         * need to protect 'state' for any races.
1238         */
1239        switch (type) {
1240        case GB_SVC_TYPE_PROTOCOL_VERSION:
1241                if (svc->state != GB_SVC_STATE_RESET)
1242                        ret = -EINVAL;
1243                break;
1244        case GB_SVC_TYPE_SVC_HELLO:
1245                if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
1246                        ret = -EINVAL;
1247                break;
1248        default:
1249                if (svc->state != GB_SVC_STATE_SVC_HELLO)
1250                        ret = -EINVAL;
1251                break;
1252        }
1253
1254        if (ret) {
1255                dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
1256                         type, svc->state);
1257                return ret;
1258        }
1259
1260        switch (type) {
1261        case GB_SVC_TYPE_PROTOCOL_VERSION:
1262                ret = gb_svc_version_request(op);
1263                if (!ret)
1264                        svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
1265                return ret;
1266        case GB_SVC_TYPE_SVC_HELLO:
1267                ret = gb_svc_hello(op);
1268                if (!ret)
1269                        svc->state = GB_SVC_STATE_SVC_HELLO;
1270                return ret;
1271        case GB_SVC_TYPE_INTF_RESET:
1272                return gb_svc_intf_reset_recv(op);
1273        case GB_SVC_TYPE_MODULE_INSERTED:
1274                return gb_svc_module_inserted_recv(op);
1275        case GB_SVC_TYPE_MODULE_REMOVED:
1276                return gb_svc_module_removed_recv(op);
1277        case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1278                return gb_svc_intf_mailbox_event_recv(op);
1279        case GB_SVC_TYPE_INTF_OOPS:
1280                return gb_svc_intf_oops_recv(op);
1281        default:
1282                dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
1283                return -EINVAL;
1284        }
1285}
1286
1287static void gb_svc_release(struct device *dev)
1288{
1289        struct gb_svc *svc = to_gb_svc(dev);
1290
1291        if (svc->connection)
1292                gb_connection_destroy(svc->connection);
1293        ida_destroy(&svc->device_id_map);
1294        destroy_workqueue(svc->wq);
1295        kfree(svc);
1296}
1297
1298struct device_type greybus_svc_type = {
1299        .name           = "greybus_svc",
1300        .release        = gb_svc_release,
1301};
1302
1303struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1304{
1305        struct gb_svc *svc;
1306
1307        svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1308        if (!svc)
1309                return NULL;
1310
1311        svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1312        if (!svc->wq) {
1313                kfree(svc);
1314                return NULL;
1315        }
1316
1317        svc->dev.parent = &hd->dev;
1318        svc->dev.bus = &greybus_bus_type;
1319        svc->dev.type = &greybus_svc_type;
1320        svc->dev.groups = svc_groups;
1321        svc->dev.dma_mask = svc->dev.parent->dma_mask;
1322        device_initialize(&svc->dev);
1323
1324        dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1325
1326        ida_init(&svc->device_id_map);
1327        svc->state = GB_SVC_STATE_RESET;
1328        svc->hd = hd;
1329
1330        svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1331                                                      gb_svc_request_handler);
1332        if (IS_ERR(svc->connection)) {
1333                dev_err(&svc->dev, "failed to create connection: %ld\n",
1334                        PTR_ERR(svc->connection));
1335                goto err_put_device;
1336        }
1337
1338        gb_connection_set_data(svc->connection, svc);
1339
1340        return svc;
1341
1342err_put_device:
1343        put_device(&svc->dev);
1344        return NULL;
1345}
1346
1347int gb_svc_add(struct gb_svc *svc)
1348{
1349        int ret;
1350
1351        /*
1352         * The SVC protocol is currently driven by the SVC, so the SVC device
1353         * is added from the connection request handler when enough
1354         * information has been received.
1355         */
1356        ret = gb_connection_enable(svc->connection);
1357        if (ret)
1358                return ret;
1359
1360        return 0;
1361}
1362
1363static void gb_svc_remove_modules(struct gb_svc *svc)
1364{
1365        struct gb_host_device *hd = svc->hd;
1366        struct gb_module *module, *tmp;
1367
1368        list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
1369                gb_module_del(module);
1370                list_del(&module->hd_node);
1371                gb_module_put(module);
1372        }
1373}
1374
1375void gb_svc_del(struct gb_svc *svc)
1376{
1377        gb_connection_disable_rx(svc->connection);
1378
1379        /*
1380         * The SVC device may have been registered from the request handler.
1381         */
1382        if (device_is_registered(&svc->dev)) {
1383                gb_svc_debugfs_exit(svc);
1384                gb_svc_watchdog_destroy(svc);
1385                device_del(&svc->dev);
1386        }
1387
1388        flush_workqueue(svc->wq);
1389
1390        gb_svc_remove_modules(svc);
1391
1392        gb_connection_disable(svc->connection);
1393}
1394
1395void gb_svc_put(struct gb_svc *svc)
1396{
1397        put_device(&svc->dev);
1398}
1399