#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT)
#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */
#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
-#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
-#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
-#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */
+#define PCIE_PORT_SERVICE_DPC_SHIFT 3 /* Downstream Port Containment */
#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT)
-#define PCIE_PORT_DEVICE_MAXSERVICES 5
+#define PCIE_PORT_DEVICE_MAXSERVICES 4
/* Port Type */
#define PCIE_ANY_PORT (~0)
flags = root->osc_control_set;
- *srv_mask = PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_DPC;
+ *srv_mask = PCIE_PORT_SERVICE_DPC;
if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
*srv_mask |= PCIE_PORT_SERVICE_HP;
if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
if (ret < 0)
return -ENODEV;
- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
- if (i != PCIE_PORT_SERVICE_VC_SHIFT)
- irqs[i] = pci_irq_vector(dev, 0);
- }
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ irqs[i] = pci_irq_vector(dev, 0);
return 0;
}
int services = 0;
int cap_mask = 0;
- cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
- | PCIE_PORT_SERVICE_VC;
+ cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP;
if (pci_aer_available())
cap_mask |= PCIE_PORT_SERVICE_AER | PCIE_PORT_SERVICE_DPC;
*/
pci_disable_pcie_error_reporting(dev);
}
- /* VC support */
- if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
- services |= PCIE_PORT_SERVICE_VC;
/* Root ports are capable of generating PME too */
if ((cap_mask & PCIE_PORT_SERVICE_PME)
&& pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
*/
status = pcie_init_service_irqs(dev, irqs, capabilities);
if (status) {
- capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP;
+ capabilities &= PCIE_PORT_SERVICE_HP;
if (!capabilities)
goto error_disable;
}