Igor Sarkisov | 25fca61 | 2020-10-06 19:30:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/pci/iov.c |
| 3 | * |
| 4 | * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com> |
| 5 | * |
| 6 | * PCI Express I/O Virtualization (IOV) support. |
| 7 | * Single Root IOV 1.0 |
| 8 | * Address Translation Service 1.0 |
| 9 | */ |
| 10 | |
| 11 | #include <linux/pci.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/mutex.h> |
| 14 | #include <linux/export.h> |
| 15 | #include <linux/string.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/pci-ats.h> |
| 18 | #include "pci.h" |
| 19 | |
| 20 | #define VIRTFN_ID_LEN 16 |
| 21 | |
| 22 | static inline u8 virtfn_bus(struct pci_dev *dev, int id) |
| 23 | { |
| 24 | return dev->bus->number + ((dev->devfn + dev->sriov->offset + |
| 25 | dev->sriov->stride * id) >> 8); |
| 26 | } |
| 27 | |
| 28 | static inline u8 virtfn_devfn(struct pci_dev *dev, int id) |
| 29 | { |
| 30 | return (dev->devfn + dev->sriov->offset + |
| 31 | dev->sriov->stride * id) & 0xff; |
| 32 | } |
| 33 | |
| 34 | static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr) |
| 35 | { |
| 36 | struct pci_bus *child; |
| 37 | |
| 38 | if (bus->number == busnr) |
| 39 | return bus; |
| 40 | |
| 41 | child = pci_find_bus(pci_domain_nr(bus), busnr); |
| 42 | if (child) |
| 43 | return child; |
| 44 | |
| 45 | child = pci_add_new_bus(bus, NULL, busnr); |
| 46 | if (!child) |
| 47 | return NULL; |
| 48 | |
| 49 | pci_bus_insert_busn_res(child, busnr, busnr); |
| 50 | bus->is_added = 1; |
| 51 | |
| 52 | return child; |
| 53 | } |
| 54 | |
| 55 | static void virtfn_remove_bus(struct pci_bus *bus, int busnr) |
| 56 | { |
| 57 | struct pci_bus *child; |
| 58 | |
| 59 | if (bus->number == busnr) |
| 60 | return; |
| 61 | |
| 62 | child = pci_find_bus(pci_domain_nr(bus), busnr); |
| 63 | BUG_ON(!child); |
| 64 | |
| 65 | if (list_empty(&child->devices)) |
| 66 | pci_remove_bus(child); |
| 67 | } |
| 68 | |
| 69 | static int virtfn_add(struct pci_dev *dev, int id, int reset) |
| 70 | { |
| 71 | int i; |
| 72 | int rc; |
| 73 | u64 size; |
| 74 | char buf[VIRTFN_ID_LEN]; |
| 75 | struct pci_dev *virtfn; |
| 76 | struct resource *res; |
| 77 | struct pci_sriov *iov = dev->sriov; |
| 78 | |
| 79 | virtfn = alloc_pci_dev(); |
| 80 | if (!virtfn) |
| 81 | return -ENOMEM; |
| 82 | |
| 83 | mutex_lock(&iov->dev->sriov->lock); |
| 84 | virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id)); |
| 85 | if (!virtfn->bus) { |
| 86 | kfree(virtfn); |
| 87 | mutex_unlock(&iov->dev->sriov->lock); |
| 88 | return -ENOMEM; |
| 89 | } |
| 90 | virtfn->devfn = virtfn_devfn(dev, id); |
| 91 | virtfn->vendor = dev->vendor; |
| 92 | pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device); |
| 93 | pci_setup_device(virtfn); |
| 94 | virtfn->dev.parent = dev->dev.parent; |
| 95 | virtfn->physfn = pci_dev_get(dev); |
| 96 | virtfn->is_virtfn = 1; |
| 97 | |
| 98 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { |
| 99 | res = dev->resource + PCI_IOV_RESOURCES + i; |
| 100 | if (!res->parent) |
| 101 | continue; |
| 102 | virtfn->resource[i].name = pci_name(virtfn); |
| 103 | virtfn->resource[i].flags = res->flags; |
| 104 | size = resource_size(res); |
| 105 | do_div(size, iov->total_VFs); |
| 106 | virtfn->resource[i].start = res->start + size * id; |
| 107 | virtfn->resource[i].end = virtfn->resource[i].start + size - 1; |
| 108 | rc = request_resource(res, &virtfn->resource[i]); |
| 109 | BUG_ON(rc); |
| 110 | } |
| 111 | |
| 112 | if (reset) |
| 113 | __pci_reset_function(virtfn); |
| 114 | |
| 115 | pci_device_add(virtfn, virtfn->bus); |
| 116 | mutex_unlock(&iov->dev->sriov->lock); |
| 117 | |
| 118 | rc = pci_bus_add_device(virtfn); |
| 119 | sprintf(buf, "virtfn%u", id); |
| 120 | rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); |
| 121 | if (rc) |
| 122 | goto failed1; |
| 123 | rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn"); |
| 124 | if (rc) |
| 125 | goto failed2; |
| 126 | |
| 127 | kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE); |
| 128 | |
| 129 | return 0; |
| 130 | |
| 131 | failed2: |
| 132 | sysfs_remove_link(&dev->dev.kobj, buf); |
| 133 | failed1: |
| 134 | pci_dev_put(dev); |
| 135 | mutex_lock(&iov->dev->sriov->lock); |
| 136 | pci_stop_and_remove_bus_device(virtfn); |
| 137 | virtfn_remove_bus(dev->bus, virtfn_bus(dev, id)); |
| 138 | mutex_unlock(&iov->dev->sriov->lock); |
| 139 | |
| 140 | return rc; |
| 141 | } |
| 142 | |
| 143 | static void virtfn_remove(struct pci_dev *dev, int id, int reset) |
| 144 | { |
| 145 | char buf[VIRTFN_ID_LEN]; |
| 146 | struct pci_bus *bus; |
| 147 | struct pci_dev *virtfn; |
| 148 | struct pci_sriov *iov = dev->sriov; |
| 149 | |
| 150 | bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id)); |
| 151 | if (!bus) |
| 152 | return; |
| 153 | |
| 154 | virtfn = pci_get_slot(bus, virtfn_devfn(dev, id)); |
| 155 | if (!virtfn) |
| 156 | return; |
| 157 | |
| 158 | pci_dev_put(virtfn); |
| 159 | |
| 160 | if (reset) { |
| 161 | device_release_driver(&virtfn->dev); |
| 162 | __pci_reset_function(virtfn); |
| 163 | } |
| 164 | |
| 165 | sprintf(buf, "virtfn%u", id); |
| 166 | sysfs_remove_link(&dev->dev.kobj, buf); |
| 167 | /* |
| 168 | * pci_stop_dev() could have been called for this virtfn already, |
| 169 | * so the directory for the virtfn may have been removed before. |
| 170 | * Double check to avoid spurious sysfs warnings. |
| 171 | */ |
| 172 | if (virtfn->dev.kobj.sd) |
| 173 | sysfs_remove_link(&virtfn->dev.kobj, "physfn"); |
| 174 | |
| 175 | mutex_lock(&iov->dev->sriov->lock); |
| 176 | pci_stop_and_remove_bus_device(virtfn); |
| 177 | virtfn_remove_bus(dev->bus, virtfn_bus(dev, id)); |
| 178 | mutex_unlock(&iov->dev->sriov->lock); |
| 179 | |
| 180 | pci_dev_put(dev); |
| 181 | } |
| 182 | |
| 183 | static int sriov_migration(struct pci_dev *dev) |
| 184 | { |
| 185 | u16 status; |
| 186 | struct pci_sriov *iov = dev->sriov; |
| 187 | |
| 188 | if (!iov->num_VFs) |
| 189 | return 0; |
| 190 | |
| 191 | if (!(iov->cap & PCI_SRIOV_CAP_VFM)) |
| 192 | return 0; |
| 193 | |
| 194 | pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status); |
| 195 | if (!(status & PCI_SRIOV_STATUS_VFM)) |
| 196 | return 0; |
| 197 | |
| 198 | schedule_work(&iov->mtask); |
| 199 | |
| 200 | return 1; |
| 201 | } |
| 202 | |
| 203 | static void sriov_migration_task(struct work_struct *work) |
| 204 | { |
| 205 | int i; |
| 206 | u8 state; |
| 207 | u16 status; |
| 208 | struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask); |
| 209 | |
| 210 | for (i = iov->initial_VFs; i < iov->num_VFs; i++) { |
| 211 | state = readb(iov->mstate + i); |
| 212 | if (state == PCI_SRIOV_VFM_MI) { |
| 213 | writeb(PCI_SRIOV_VFM_AV, iov->mstate + i); |
| 214 | state = readb(iov->mstate + i); |
| 215 | if (state == PCI_SRIOV_VFM_AV) |
| 216 | virtfn_add(iov->self, i, 1); |
| 217 | } else if (state == PCI_SRIOV_VFM_MO) { |
| 218 | virtfn_remove(iov->self, i, 1); |
| 219 | writeb(PCI_SRIOV_VFM_UA, iov->mstate + i); |
| 220 | state = readb(iov->mstate + i); |
| 221 | if (state == PCI_SRIOV_VFM_AV) |
| 222 | virtfn_add(iov->self, i, 0); |
| 223 | } |
| 224 | } |
| 225 | |
| 226 | pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status); |
| 227 | status &= ~PCI_SRIOV_STATUS_VFM; |
| 228 | pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status); |
| 229 | } |
| 230 | |
| 231 | static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn) |
| 232 | { |
| 233 | int bir; |
| 234 | u32 table; |
| 235 | resource_size_t pa; |
| 236 | struct pci_sriov *iov = dev->sriov; |
| 237 | |
| 238 | if (nr_virtfn <= iov->initial_VFs) |
| 239 | return 0; |
| 240 | |
| 241 | pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table); |
| 242 | bir = PCI_SRIOV_VFM_BIR(table); |
| 243 | if (bir > PCI_STD_RESOURCE_END) |
| 244 | return -EIO; |
| 245 | |
| 246 | table = PCI_SRIOV_VFM_OFFSET(table); |
| 247 | if (table + nr_virtfn > pci_resource_len(dev, bir)) |
| 248 | return -EIO; |
| 249 | |
| 250 | pa = pci_resource_start(dev, bir) + table; |
| 251 | iov->mstate = ioremap(pa, nr_virtfn); |
| 252 | if (!iov->mstate) |
| 253 | return -ENOMEM; |
| 254 | |
| 255 | INIT_WORK(&iov->mtask, sriov_migration_task); |
| 256 | |
| 257 | iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR; |
| 258 | pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); |
| 259 | |
| 260 | return 0; |
| 261 | } |
| 262 | |
| 263 | static void sriov_disable_migration(struct pci_dev *dev) |
| 264 | { |
| 265 | struct pci_sriov *iov = dev->sriov; |
| 266 | |
| 267 | iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR); |
| 268 | pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); |
| 269 | |
| 270 | cancel_work_sync(&iov->mtask); |
| 271 | iounmap(iov->mstate); |
| 272 | } |
| 273 | |
| 274 | static int sriov_enable(struct pci_dev *dev, int nr_virtfn) |
| 275 | { |
| 276 | int rc; |
| 277 | int i, j; |
| 278 | int nres; |
| 279 | u16 offset, stride, initial; |
| 280 | struct resource *res; |
| 281 | struct pci_dev *pdev; |
| 282 | struct pci_sriov *iov = dev->sriov; |
| 283 | int bars = 0; |
| 284 | |
| 285 | if (!nr_virtfn) |
| 286 | return 0; |
| 287 | |
| 288 | if (iov->num_VFs) |
| 289 | return -EINVAL; |
| 290 | |
| 291 | pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial); |
| 292 | if (initial > iov->total_VFs || |
| 293 | (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs))) |
| 294 | return -EIO; |
| 295 | |
| 296 | if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs || |
| 297 | (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial))) |
| 298 | return -EINVAL; |
| 299 | |
| 300 | pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn); |
| 301 | pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset); |
| 302 | pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride); |
| 303 | if (!offset || (nr_virtfn > 1 && !stride)) |
| 304 | return -EIO; |
| 305 | |
| 306 | nres = 0; |
| 307 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { |
| 308 | bars |= (1 << (i + PCI_IOV_RESOURCES)); |
| 309 | res = dev->resource + PCI_IOV_RESOURCES + i; |
| 310 | if (res->parent) |
| 311 | nres++; |
| 312 | } |
| 313 | if (nres != iov->nres) { |
| 314 | dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n"); |
| 315 | return -ENOMEM; |
| 316 | } |
| 317 | |
| 318 | iov->offset = offset; |
| 319 | iov->stride = stride; |
| 320 | |
| 321 | if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->busn_res.end) { |
| 322 | dev_err(&dev->dev, "SR-IOV: bus number out of range\n"); |
| 323 | return -ENOMEM; |
| 324 | } |
| 325 | |
| 326 | if (pci_enable_resources(dev, bars)) { |
| 327 | dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n"); |
| 328 | return -ENOMEM; |
| 329 | } |
| 330 | |
| 331 | if (iov->link != dev->devfn) { |
| 332 | pdev = pci_get_slot(dev->bus, iov->link); |
| 333 | if (!pdev) |
| 334 | return -ENODEV; |
| 335 | |
| 336 | pci_dev_put(pdev); |
| 337 | |
| 338 | if (!pdev->is_physfn) |
| 339 | return -ENODEV; |
| 340 | |
| 341 | rc = sysfs_create_link(&dev->dev.kobj, |
| 342 | &pdev->dev.kobj, "dep_link"); |
| 343 | if (rc) |
| 344 | return rc; |
| 345 | } |
| 346 | |
| 347 | iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; |
| 348 | pci_cfg_access_lock(dev); |
| 349 | pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); |
| 350 | msleep(100); |
| 351 | pci_cfg_access_unlock(dev); |
| 352 | |
| 353 | iov->initial_VFs = initial; |
| 354 | if (nr_virtfn < initial) |
| 355 | initial = nr_virtfn; |
| 356 | |
| 357 | for (i = 0; i < initial; i++) { |
| 358 | rc = virtfn_add(dev, i, 0); |
| 359 | if (rc) |
| 360 | goto failed; |
| 361 | } |
| 362 | |
| 363 | if (iov->cap & PCI_SRIOV_CAP_VFM) { |
| 364 | rc = sriov_enable_migration(dev, nr_virtfn); |
| 365 | if (rc) |
| 366 | goto failed; |
| 367 | } |
| 368 | |
| 369 | kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE); |
| 370 | iov->num_VFs = nr_virtfn; |
| 371 | |
| 372 | return 0; |
| 373 | |
| 374 | failed: |
| 375 | for (j = 0; j < i; j++) |
| 376 | virtfn_remove(dev, j, 0); |
| 377 | |
| 378 | iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); |
| 379 | pci_cfg_access_lock(dev); |
| 380 | pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); |
| 381 | ssleep(1); |
| 382 | pci_cfg_access_unlock(dev); |
| 383 | |
| 384 | if (iov->link != dev->devfn) |
| 385 | sysfs_remove_link(&dev->dev.kobj, "dep_link"); |
| 386 | |
| 387 | return rc; |
| 388 | } |
| 389 | |
| 390 | static void sriov_disable(struct pci_dev *dev) |
| 391 | { |
| 392 | int i; |
| 393 | struct pci_sriov *iov = dev->sriov; |
| 394 | |
| 395 | if (!iov->num_VFs) |
| 396 | return; |
| 397 | |
| 398 | if (iov->cap & PCI_SRIOV_CAP_VFM) |
| 399 | sriov_disable_migration(dev); |
| 400 | |
| 401 | for (i = 0; i < iov->num_VFs; i++) |
| 402 | virtfn_remove(dev, i, 0); |
| 403 | |
| 404 | iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); |
| 405 | pci_cfg_access_lock(dev); |
| 406 | pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); |
| 407 | ssleep(1); |
| 408 | pci_cfg_access_unlock(dev); |
| 409 | |
| 410 | if (iov->link != dev->devfn) |
| 411 | sysfs_remove_link(&dev->dev.kobj, "dep_link"); |
| 412 | |
| 413 | iov->num_VFs = 0; |
| 414 | } |
| 415 | |
| 416 | static int sriov_init(struct pci_dev *dev, int pos) |
| 417 | { |
| 418 | int i; |
| 419 | int rc; |
| 420 | int nres; |
| 421 | u32 pgsz; |
| 422 | u16 ctrl, total, offset, stride; |
| 423 | struct pci_sriov *iov; |
| 424 | struct resource *res; |
| 425 | struct pci_dev *pdev; |
| 426 | |
| 427 | if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END && |
| 428 | pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT) |
| 429 | return -ENODEV; |
| 430 | |
| 431 | pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl); |
| 432 | if (ctrl & PCI_SRIOV_CTRL_VFE) { |
| 433 | pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0); |
| 434 | ssleep(1); |
| 435 | } |
| 436 | |
| 437 | pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total); |
| 438 | if (!total) |
| 439 | return 0; |
| 440 | |
| 441 | ctrl = 0; |
| 442 | list_for_each_entry(pdev, &dev->bus->devices, bus_list) |
| 443 | if (pdev->is_physfn) |
| 444 | goto found; |
| 445 | |
| 446 | pdev = NULL; |
| 447 | if (pci_ari_enabled(dev->bus)) |
| 448 | ctrl |= PCI_SRIOV_CTRL_ARI; |
| 449 | |
| 450 | found: |
| 451 | pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl); |
| 452 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); |
| 453 | pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); |
| 454 | if (!offset || (total > 1 && !stride)) |
| 455 | return -EIO; |
| 456 | |
| 457 | pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz); |
| 458 | i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0; |
| 459 | pgsz &= ~((1 << i) - 1); |
| 460 | if (!pgsz) |
| 461 | return -EIO; |
| 462 | |
| 463 | pgsz &= ~(pgsz - 1); |
| 464 | pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz); |
| 465 | |
| 466 | nres = 0; |
| 467 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { |
| 468 | res = dev->resource + PCI_IOV_RESOURCES + i; |
| 469 | i += __pci_read_base(dev, pci_bar_unknown, res, |
| 470 | pos + PCI_SRIOV_BAR + i * 4); |
| 471 | if (!res->flags) |
| 472 | continue; |
| 473 | if (resource_size(res) & (PAGE_SIZE - 1)) { |
| 474 | rc = -EIO; |
| 475 | goto failed; |
| 476 | } |
| 477 | res->end = res->start + resource_size(res) * total - 1; |
| 478 | nres++; |
| 479 | } |
| 480 | |
| 481 | iov = kzalloc(sizeof(*iov), GFP_KERNEL); |
| 482 | if (!iov) { |
| 483 | rc = -ENOMEM; |
| 484 | goto failed; |
| 485 | } |
| 486 | |
| 487 | iov->pos = pos; |
| 488 | iov->nres = nres; |
| 489 | iov->ctrl = ctrl; |
| 490 | iov->total_VFs = total; |
| 491 | iov->offset = offset; |
| 492 | iov->stride = stride; |
| 493 | iov->pgsz = pgsz; |
| 494 | iov->self = dev; |
| 495 | pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); |
| 496 | pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); |
| 497 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) |
| 498 | iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link); |
| 499 | |
| 500 | if (pdev) |
| 501 | iov->dev = pci_dev_get(pdev); |
| 502 | else |
| 503 | iov->dev = dev; |
| 504 | |
| 505 | mutex_init(&iov->lock); |
| 506 | |
| 507 | dev->sriov = iov; |
| 508 | dev->is_physfn = 1; |
| 509 | |
| 510 | return 0; |
| 511 | |
| 512 | failed: |
| 513 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { |
| 514 | res = dev->resource + PCI_IOV_RESOURCES + i; |
| 515 | res->flags = 0; |
| 516 | } |
| 517 | |
| 518 | return rc; |
| 519 | } |
| 520 | |
| 521 | static void sriov_release(struct pci_dev *dev) |
| 522 | { |
| 523 | BUG_ON(dev->sriov->num_VFs); |
| 524 | |
| 525 | if (dev != dev->sriov->dev) |
| 526 | pci_dev_put(dev->sriov->dev); |
| 527 | |
| 528 | mutex_destroy(&dev->sriov->lock); |
| 529 | |
| 530 | kfree(dev->sriov); |
| 531 | dev->sriov = NULL; |
| 532 | } |
| 533 | |
| 534 | static void sriov_restore_state(struct pci_dev *dev) |
| 535 | { |
| 536 | int i; |
| 537 | u16 ctrl; |
| 538 | struct pci_sriov *iov = dev->sriov; |
| 539 | |
| 540 | pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl); |
| 541 | if (ctrl & PCI_SRIOV_CTRL_VFE) |
| 542 | return; |
| 543 | |
| 544 | for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) |
| 545 | pci_update_resource(dev, i); |
| 546 | |
| 547 | pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz); |
| 548 | pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->num_VFs); |
| 549 | pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); |
| 550 | if (iov->ctrl & PCI_SRIOV_CTRL_VFE) |
| 551 | msleep(100); |
| 552 | } |
| 553 | |
| 554 | /** |
| 555 | * pci_iov_init - initialize the IOV capability |
| 556 | * @dev: the PCI device |
| 557 | * |
| 558 | * Returns 0 on success, or negative on failure. |
| 559 | */ |
| 560 | int pci_iov_init(struct pci_dev *dev) |
| 561 | { |
| 562 | int pos; |
| 563 | |
| 564 | if (!pci_is_pcie(dev)) |
| 565 | return -ENODEV; |
| 566 | |
| 567 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); |
| 568 | if (pos) |
| 569 | return sriov_init(dev, pos); |
| 570 | |
| 571 | return -ENODEV; |
| 572 | } |
| 573 | |
| 574 | /** |
| 575 | * pci_iov_release - release resources used by the IOV capability |
| 576 | * @dev: the PCI device |
| 577 | */ |
| 578 | void pci_iov_release(struct pci_dev *dev) |
| 579 | { |
| 580 | if (dev->is_physfn) |
| 581 | sriov_release(dev); |
| 582 | } |
| 583 | |
| 584 | /** |
| 585 | * pci_iov_resource_bar - get position of the SR-IOV BAR |
| 586 | * @dev: the PCI device |
| 587 | * @resno: the resource number |
| 588 | * @type: the BAR type to be filled in |
| 589 | * |
| 590 | * Returns position of the BAR encapsulated in the SR-IOV capability. |
| 591 | */ |
| 592 | int pci_iov_resource_bar(struct pci_dev *dev, int resno, |
| 593 | enum pci_bar_type *type) |
| 594 | { |
| 595 | if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END) |
| 596 | return 0; |
| 597 | |
| 598 | BUG_ON(!dev->is_physfn); |
| 599 | |
| 600 | *type = pci_bar_unknown; |
| 601 | |
| 602 | return dev->sriov->pos + PCI_SRIOV_BAR + |
| 603 | 4 * (resno - PCI_IOV_RESOURCES); |
| 604 | } |
| 605 | |
| 606 | /** |
| 607 | * pci_sriov_resource_alignment - get resource alignment for VF BAR |
| 608 | * @dev: the PCI device |
| 609 | * @resno: the resource number |
| 610 | * |
| 611 | * Returns the alignment of the VF BAR found in the SR-IOV capability. |
| 612 | * This is not the same as the resource size which is defined as |
| 613 | * the VF BAR size multiplied by the number of VFs. The alignment |
| 614 | * is just the VF BAR size. |
| 615 | */ |
| 616 | resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) |
| 617 | { |
| 618 | struct resource tmp; |
| 619 | enum pci_bar_type type; |
| 620 | int reg = pci_iov_resource_bar(dev, resno, &type); |
| 621 | |
| 622 | if (!reg) |
| 623 | return 0; |
| 624 | |
| 625 | __pci_read_base(dev, type, &tmp, reg); |
| 626 | return resource_alignment(&tmp); |
| 627 | } |
| 628 | |
| 629 | /** |
| 630 | * pci_restore_iov_state - restore the state of the IOV capability |
| 631 | * @dev: the PCI device |
| 632 | */ |
| 633 | void pci_restore_iov_state(struct pci_dev *dev) |
| 634 | { |
| 635 | if (dev->is_physfn) |
| 636 | sriov_restore_state(dev); |
| 637 | } |
| 638 | |
| 639 | /** |
| 640 | * pci_iov_bus_range - find bus range used by Virtual Function |
| 641 | * @bus: the PCI bus |
| 642 | * |
| 643 | * Returns max number of buses (exclude current one) used by Virtual |
| 644 | * Functions. |
| 645 | */ |
| 646 | int pci_iov_bus_range(struct pci_bus *bus) |
| 647 | { |
| 648 | int max = 0; |
| 649 | u8 busnr; |
| 650 | struct pci_dev *dev; |
| 651 | |
| 652 | list_for_each_entry(dev, &bus->devices, bus_list) { |
| 653 | if (!dev->is_physfn) |
| 654 | continue; |
| 655 | busnr = virtfn_bus(dev, dev->sriov->total_VFs - 1); |
| 656 | if (busnr > max) |
| 657 | max = busnr; |
| 658 | } |
| 659 | |
| 660 | return max ? max - bus->number : 0; |
| 661 | } |
| 662 | |
| 663 | /** |
| 664 | * pci_enable_sriov - enable the SR-IOV capability |
| 665 | * @dev: the PCI device |
| 666 | * @nr_virtfn: number of virtual functions to enable |
| 667 | * |
| 668 | * Returns 0 on success, or negative on failure. |
| 669 | */ |
| 670 | int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) |
| 671 | { |
| 672 | might_sleep(); |
| 673 | |
| 674 | if (!dev->is_physfn) |
| 675 | return -ENODEV; |
| 676 | |
| 677 | return sriov_enable(dev, nr_virtfn); |
| 678 | } |
| 679 | EXPORT_SYMBOL_GPL(pci_enable_sriov); |
| 680 | |
| 681 | /** |
| 682 | * pci_disable_sriov - disable the SR-IOV capability |
| 683 | * @dev: the PCI device |
| 684 | */ |
| 685 | void pci_disable_sriov(struct pci_dev *dev) |
| 686 | { |
| 687 | might_sleep(); |
| 688 | |
| 689 | if (!dev->is_physfn) |
| 690 | return; |
| 691 | |
| 692 | sriov_disable(dev); |
| 693 | } |
| 694 | EXPORT_SYMBOL_GPL(pci_disable_sriov); |
| 695 | |
| 696 | /** |
| 697 | * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration |
| 698 | * @dev: the PCI device |
| 699 | * |
| 700 | * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not. |
| 701 | * |
| 702 | * Physical Function driver is responsible to register IRQ handler using |
| 703 | * VF Migration Interrupt Message Number, and call this function when the |
| 704 | * interrupt is generated by the hardware. |
| 705 | */ |
| 706 | irqreturn_t pci_sriov_migration(struct pci_dev *dev) |
| 707 | { |
| 708 | if (!dev->is_physfn) |
| 709 | return IRQ_NONE; |
| 710 | |
| 711 | return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE; |
| 712 | } |
| 713 | EXPORT_SYMBOL_GPL(pci_sriov_migration); |
| 714 | |
| 715 | /** |
| 716 | * pci_num_vf - return number of VFs associated with a PF device_release_driver |
| 717 | * @dev: the PCI device |
| 718 | * |
| 719 | * Returns number of VFs, or 0 if SR-IOV is not enabled. |
| 720 | */ |
| 721 | int pci_num_vf(struct pci_dev *dev) |
| 722 | { |
| 723 | if (!dev->is_physfn) |
| 724 | return 0; |
| 725 | |
| 726 | return dev->sriov->num_VFs; |
| 727 | } |
| 728 | EXPORT_SYMBOL_GPL(pci_num_vf); |
| 729 | |
| 730 | /** |
| 731 | * pci_vfs_assigned - returns number of VFs are assigned to a guest |
| 732 | * @dev: the PCI device |
| 733 | * |
| 734 | * Returns number of VFs belonging to this device that are assigned to a guest. |
| 735 | * If device is not a physical function returns -ENODEV. |
| 736 | */ |
| 737 | int pci_vfs_assigned(struct pci_dev *dev) |
| 738 | { |
| 739 | struct pci_dev *vfdev; |
| 740 | unsigned int vfs_assigned = 0; |
| 741 | unsigned short dev_id; |
| 742 | |
| 743 | /* only search if we are a PF */ |
| 744 | if (!dev->is_physfn) |
| 745 | return 0; |
| 746 | |
| 747 | /* |
| 748 | * determine the device ID for the VFs, the vendor ID will be the |
| 749 | * same as the PF so there is no need to check for that one |
| 750 | */ |
| 751 | pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id); |
| 752 | |
| 753 | /* loop through all the VFs to see if we own any that are assigned */ |
| 754 | vfdev = pci_get_device(dev->vendor, dev_id, NULL); |
| 755 | while (vfdev) { |
| 756 | /* |
| 757 | * It is considered assigned if it is a virtual function with |
| 758 | * our dev as the physical function and the assigned bit is set |
| 759 | */ |
| 760 | if (vfdev->is_virtfn && (vfdev->physfn == dev) && |
| 761 | (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) |
| 762 | vfs_assigned++; |
| 763 | |
| 764 | vfdev = pci_get_device(dev->vendor, dev_id, vfdev); |
| 765 | } |
| 766 | |
| 767 | return vfs_assigned; |
| 768 | } |
| 769 | EXPORT_SYMBOL_GPL(pci_vfs_assigned); |
| 770 | |
| 771 | /** |
| 772 | * pci_sriov_set_totalvfs -- reduce the TotalVFs available |
| 773 | * @dev: the PCI PF device |
| 774 | * @numvfs: number that should be used for TotalVFs supported |
| 775 | * |
| 776 | * Should be called from PF driver's probe routine with |
| 777 | * device's mutex held. |
| 778 | * |
| 779 | * Returns 0 if PF is an SRIOV-capable device and |
| 780 | * value of numvfs valid. If not a PF with VFS, return -EINVAL; |
| 781 | * if VFs already enabled, return -EBUSY. |
| 782 | */ |
| 783 | int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) |
| 784 | { |
| 785 | if (!dev->is_physfn || (numvfs > dev->sriov->total_VFs)) |
| 786 | return -EINVAL; |
| 787 | |
| 788 | /* Shouldn't change if VFs already enabled */ |
| 789 | if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE) |
| 790 | return -EBUSY; |
| 791 | else |
| 792 | dev->sriov->driver_max_VFs = numvfs; |
| 793 | |
| 794 | return 0; |
| 795 | } |
| 796 | EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs); |
| 797 | |
| 798 | /** |
| 799 | * pci_sriov_get_totalvfs -- get total VFs supported on this devic3 |
| 800 | * @dev: the PCI PF device |
| 801 | * |
| 802 | * For a PCIe device with SRIOV support, return the PCIe |
| 803 | * SRIOV capability value of TotalVFs or the value of driver_max_VFs |
| 804 | * if the driver reduced it. Otherwise, -EINVAL. |
| 805 | */ |
| 806 | int pci_sriov_get_totalvfs(struct pci_dev *dev) |
| 807 | { |
| 808 | if (!dev->is_physfn) |
| 809 | return -EINVAL; |
| 810 | |
| 811 | if (dev->sriov->driver_max_VFs) |
| 812 | return dev->sriov->driver_max_VFs; |
| 813 | |
| 814 | return dev->sriov->total_VFs; |
| 815 | } |
| 816 | EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs); |