1
0

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "This is mostly updates of the usual drivers: UFS, esp_scsi, NCR5380,
  qla2xxx, lpfc, libsas, hisi_sas.

  In addition there's a set of mostly small updates to the target
  subsystem a set of conversions to the generic DMA API, which do have
  some potential for issues in the older drivers but we'll handle those
  as case by case fixes.

  A new myrs driver for the DAC960/mylex raid controllers to replace the
  block based DAC960 which is also being removed by Jens in this merge
  window.

  Plus the usual slew of trivial changes"

[ "myrs" stands for "MYlex Raid Scsi". Obviously. Silly of me to even
  wonder. There's also a "myrb" driver, where the 'b' stands for
  'block'. Truly, somebody has got mad naming skillz. - Linus ]

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (237 commits)
  scsi: myrs: Fix the processor absent message in processor_show()
  scsi: myrs: Fix a logical vs bitwise bug
  scsi: hisi_sas: Fix NULL pointer dereference
  scsi: myrs: fix build failure on 32 bit
  scsi: fnic: replace gross legacy tag hack with blk-mq hack
  scsi: mesh: switch to generic DMA API
  scsi: ips: switch to generic DMA API
  scsi: smartpqi: fully convert to the generic DMA API
  scsi: vmw_pscsi: switch to generic DMA API
  scsi: snic: switch to generic DMA API
  scsi: qla4xxx: fully convert to the generic DMA API
  scsi: qla2xxx: fully convert to the generic DMA API
  scsi: qla1280: switch to generic DMA API
  scsi: qedi: fully convert to the generic DMA API
  scsi: qedf: fully convert to the generic DMA API
  scsi: pm8001: switch to generic DMA API
  scsi: nsp32: switch to generic DMA API
  scsi: mvsas: fully convert to the generic DMA API
  scsi: mvumi: switch to generic DMA API
  scsi: mpt3sas: switch to generic DMA API
  ...
This commit is contained in:
Linus Torvalds
2018-10-25 07:40:30 -07:00
200 changed files with 16045 additions and 5954 deletions

View File

@@ -128,6 +128,26 @@ The current UFSHCD implementation supports following functionality,
In this version of UFSHCD Query requests and power management In this version of UFSHCD Query requests and power management
functionality are not implemented. functionality are not implemented.
4. BSG Support
------------------
This transport driver supports exchanging UFS protocol information units
(UPIUs) with a UFS device. Typically, user space will allocate
struct ufs_bsg_request and struct ufs_bsg_reply (see ufs_bsg.h) as
request_upiu and reply_upiu respectively. Filling those UPIUs should
be done in accordance with JEDEC spec UFS2.1 paragraph 10.7.
*Caveat emptor*: The driver makes no further input validations and sends the
UPIU to the device as it is. Open the bsg device in /dev/ufs-bsg and
send SG_IO with the applicable sg_io_v4:
io_hdr_v4.guard = 'Q';
io_hdr_v4.protocol = BSG_PROTOCOL_SCSI;
io_hdr_v4.subprotocol = BSG_SUB_PROTOCOL_SCSI_TRANSPORT;
io_hdr_v4.response = (__u64)reply_upiu;
io_hdr_v4.max_response_len = reply_len;
io_hdr_v4.request_len = request_len;
io_hdr_v4.request = (__u64)request_upiu;
UFS Specifications can be found at, UFS Specifications can be found at,
UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf

View File

@@ -4055,7 +4055,7 @@ M: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
S: Supported S: Supported
F: drivers/scsi/cxlflash/ F: drivers/scsi/cxlflash/
F: include/uapi/scsi/cxlflash_ioctls.h F: include/uapi/scsi/cxlflash_ioctl.h
F: Documentation/powerpc/cxlflash.txt F: Documentation/powerpc/cxlflash.txt
CYBERPRO FB DRIVER CYBERPRO FB DRIVER
@@ -10011,6 +10011,13 @@ S: Supported
F: drivers/gpu/drm/mxsfb/ F: drivers/gpu/drm/mxsfb/
F: Documentation/devicetree/bindings/display/mxsfb.txt F: Documentation/devicetree/bindings/display/mxsfb.txt
MYLEX DAC960 PCI RAID Controller
M: Hannes Reinecke <hare@kernel.org>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/myrb.*
F: drivers/scsi/myrs.*
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
M: Chris Lee <christopher.lee@cspi.com> M: Chris Lee <christopher.lee@cspi.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org

View File

@@ -2004,7 +2004,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_6
U64 LinkFailureCount; /* 50h */ U64 LinkFailureCount; /* 50h */
U64 LossOfSyncCount; /* 58h */ U64 LossOfSyncCount; /* 58h */
U64 LossOfSignalCount; /* 60h */ U64 LossOfSignalCount; /* 60h */
U64 PrimativeSeqErrCount; /* 68h */ U64 PrimitiveSeqErrCount; /* 68h */
U64 InvalidTxWordCount; /* 70h */ U64 InvalidTxWordCount; /* 70h */
U64 InvalidCrcCount; /* 78h */ U64 InvalidCrcCount; /* 78h */
U64 FcpInitiatorIoCount; /* 80h */ U64 FcpInitiatorIoCount; /* 80h */

View File

@@ -335,11 +335,11 @@ static int mpt_remove_dead_ioc_func(void *arg)
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
struct pci_dev *pdev; struct pci_dev *pdev;
if ((ioc == NULL)) if (!ioc)
return -1; return -1;
pdev = ioc->pcidev; pdev = ioc->pcidev;
if ((pdev == NULL)) if (!pdev)
return -1; return -1;
pci_stop_and_remove_bus_device_locked(pdev); pci_stop_and_remove_bus_device_locked(pdev);
@@ -7570,11 +7570,11 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
u8 phy_num = (u8)(evData0); u8 phy_num = (u8)(evData0);
u8 port_num = (u8)(evData0 >> 8); u8 port_num = (u8)(evData0 >> 8);
u8 port_width = (u8)(evData0 >> 16); u8 port_width = (u8)(evData0 >> 16);
u8 primative = (u8)(evData0 >> 24); u8 primitive = (u8)(evData0 >> 24);
snprintf(evStr, EVENT_DESCR_STR_SZ, snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Broadcase Primative: phy=%d port=%d " "SAS Broadcast Primitive: phy=%d port=%d "
"width=%d primative=0x%02x", "width=%d primitive=0x%02x",
phy_num, port_num, port_width, primative); phy_num, port_num, port_width, primitive);
break; break;
} }

View File

@@ -129,7 +129,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc,
static void mptsas_send_expander_event(struct fw_event_work *fw_event); static void mptsas_send_expander_event(struct fw_event_work *fw_event);
static void mptsas_not_responding_devices(MPT_ADAPTER *ioc); static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc); static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event); static void mptsas_broadcast_primitive_work(struct fw_event_work *fw_event);
static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event); static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id); static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
void mptsas_schedule_target_reset(void *ioc); void mptsas_schedule_target_reset(void *ioc);
@@ -1665,7 +1665,7 @@ mptsas_firmware_event_work(struct work_struct *work)
mptsas_free_fw_event(ioc, fw_event); mptsas_free_fw_event(ioc, fw_event);
break; break;
case MPI_EVENT_SAS_BROADCAST_PRIMITIVE: case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
mptsas_broadcast_primative_work(fw_event); mptsas_broadcast_primitive_work(fw_event);
break; break;
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE: case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
mptsas_send_expander_event(fw_event); mptsas_send_expander_event(fw_event);
@@ -4826,13 +4826,13 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
} }
/** /**
* mptsas_broadcast_primative_work - Handle broadcast primitives * mptsas_broadcast_primitive_work - Handle broadcast primitives
* @work: work queue payload containing info describing the event * @work: work queue payload containing info describing the event
* *
* this will be handled in workqueue context. * this will be handled in workqueue context.
*/ */
static void static void
mptsas_broadcast_primative_work(struct fw_event_work *fw_event) mptsas_broadcast_primitive_work(struct fw_event_work *fw_event)
{ {
MPT_ADAPTER *ioc = fw_event->ioc; MPT_ADAPTER *ioc = fw_event->ioc;
MPT_FRAME_HDR *mf; MPT_FRAME_HDR *mf;

View File

@@ -518,7 +518,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr; unsigned long *cpu_addr;
int retval = 1; int retval = 1;
cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (!cpu_addr) { if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out; goto out;
@@ -526,7 +527,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) { if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory"); TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle); dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
cpu_addr, dma_handle);
goto out; goto out;
} }
@@ -1027,16 +1029,16 @@ out:
static void twa_free_device_extension(TW_Device_Extension *tw_dev) static void twa_free_device_extension(TW_Device_Extension *tw_dev)
{ {
if (tw_dev->command_packet_virt[0]) if (tw_dev->command_packet_virt[0])
pci_free_consistent(tw_dev->tw_pci_dev, dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Full)*TW_Q_LENGTH, sizeof(TW_Command_Full) * TW_Q_LENGTH,
tw_dev->command_packet_virt[0], tw_dev->command_packet_virt[0],
tw_dev->command_packet_phys[0]); tw_dev->command_packet_phys[0]);
if (tw_dev->generic_buffer_virt[0]) if (tw_dev->generic_buffer_virt[0])
pci_free_consistent(tw_dev->tw_pci_dev, dma_free_coherent(&tw_dev->tw_pci_dev->dev,
TW_SECTOR_SIZE*TW_Q_LENGTH, TW_SECTOR_SIZE * TW_Q_LENGTH,
tw_dev->generic_buffer_virt[0], tw_dev->generic_buffer_virt[0],
tw_dev->generic_buffer_phys[0]); tw_dev->generic_buffer_phys[0]);
kfree(tw_dev->event_queue[0]); kfree(tw_dev->event_queue[0]);
} /* End twa_free_device_extension() */ } /* End twa_free_device_extension() */
@@ -2015,14 +2017,12 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev); pci_set_master(pdev);
pci_try_set_mwi(pdev); pci_try_set_mwi(pdev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { retval = -ENODEV;
TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); goto out_disable_device;
retval = -ENODEV; }
goto out_disable_device;
}
host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
if (!host) { if (!host) {
@@ -2237,14 +2237,12 @@ static int twa_resume(struct pci_dev *pdev)
pci_set_master(pdev); pci_set_master(pdev);
pci_try_set_mwi(pdev); pci_try_set_mwi(pdev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { retval = -ENODEV;
TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); goto out_disable_device;
retval = -ENODEV; }
goto out_disable_device;
}
/* Initialize the card */ /* Initialize the card */
if (twa_reset_sequence(tw_dev, 0)) { if (twa_reset_sequence(tw_dev, 0)) {

View File

@@ -644,8 +644,8 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr; unsigned long *cpu_addr;
int retval = 1; int retval = 1;
cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH, cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev,
&dma_handle); size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (!cpu_addr) { if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out; goto out;
@@ -899,19 +899,19 @@ out:
static void twl_free_device_extension(TW_Device_Extension *tw_dev) static void twl_free_device_extension(TW_Device_Extension *tw_dev)
{ {
if (tw_dev->command_packet_virt[0]) if (tw_dev->command_packet_virt[0])
pci_free_consistent(tw_dev->tw_pci_dev, dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Full)*TW_Q_LENGTH, sizeof(TW_Command_Full)*TW_Q_LENGTH,
tw_dev->command_packet_virt[0], tw_dev->command_packet_virt[0],
tw_dev->command_packet_phys[0]); tw_dev->command_packet_phys[0]);
if (tw_dev->generic_buffer_virt[0]) if (tw_dev->generic_buffer_virt[0])
pci_free_consistent(tw_dev->tw_pci_dev, dma_free_coherent(&tw_dev->tw_pci_dev->dev,
TW_SECTOR_SIZE*TW_Q_LENGTH, TW_SECTOR_SIZE*TW_Q_LENGTH,
tw_dev->generic_buffer_virt[0], tw_dev->generic_buffer_virt[0],
tw_dev->generic_buffer_phys[0]); tw_dev->generic_buffer_phys[0]);
if (tw_dev->sense_buffer_virt[0]) if (tw_dev->sense_buffer_virt[0])
pci_free_consistent(tw_dev->tw_pci_dev, dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Apache_Header)* sizeof(TW_Command_Apache_Header)*
TW_Q_LENGTH, TW_Q_LENGTH,
tw_dev->sense_buffer_virt[0], tw_dev->sense_buffer_virt[0],
@@ -1571,14 +1571,12 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev); pci_set_master(pdev);
pci_try_set_mwi(pdev); pci_try_set_mwi(pdev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { retval = -ENODEV;
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask"); goto out_disable_device;
retval = -ENODEV; }
goto out_disable_device;
}
host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
if (!host) { if (!host) {
@@ -1805,14 +1803,12 @@ static int twl_resume(struct pci_dev *pdev)
pci_set_master(pdev); pci_set_master(pdev);
pci_try_set_mwi(pdev); pci_try_set_mwi(pdev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { retval = -ENODEV;
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume"); goto out_disable_device;
retval = -ENODEV; }
goto out_disable_device;
}
/* Initialize the card */ /* Initialize the card */
if (twl_reset_sequence(tw_dev, 0)) { if (twl_reset_sequence(tw_dev, 0)) {

View File

@@ -834,15 +834,17 @@ static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n"); dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n");
cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (cpu_addr == NULL) { if (cpu_addr == NULL) {
printk(KERN_WARNING "3w-xxxx: pci_alloc_consistent() failed.\n"); printk(KERN_WARNING "3w-xxxx: dma_alloc_coherent() failed.\n");
return 1; return 1;
} }
if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) { if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) {
printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n"); printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n");
pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle); dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
cpu_addr, dma_handle);
return 1; return 1;
} }
@@ -1062,10 +1064,16 @@ static void tw_free_device_extension(TW_Device_Extension *tw_dev)
/* Free command packet and generic buffer memory */ /* Free command packet and generic buffer memory */
if (tw_dev->command_packet_virtual_address[0]) if (tw_dev->command_packet_virtual_address[0])
pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Command)*TW_Q_LENGTH, tw_dev->command_packet_virtual_address[0], tw_dev->command_packet_physical_address[0]); dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command) * TW_Q_LENGTH,
tw_dev->command_packet_virtual_address[0],
tw_dev->command_packet_physical_address[0]);
if (tw_dev->alignment_virtual_address[0]) if (tw_dev->alignment_virtual_address[0])
pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Sector)*TW_Q_LENGTH, tw_dev->alignment_virtual_address[0], tw_dev->alignment_physical_address[0]); dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Sector) * TW_Q_LENGTH,
tw_dev->alignment_virtual_address[0],
tw_dev->alignment_physical_address[0]);
} /* End tw_free_device_extension() */ } /* End tw_free_device_extension() */
/* This function will send an initconnection command to controller */ /* This function will send an initconnection command to controller */
@@ -2260,7 +2268,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev); pci_set_master(pdev);
retval = pci_set_dma_mask(pdev, TW_DMA_MASK); retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) { if (retval) {
printk(KERN_WARNING "3w-xxxx: Failed to set dma mask."); printk(KERN_WARNING "3w-xxxx: Failed to set dma mask.");
goto out_disable_device; goto out_disable_device;

View File

@@ -230,7 +230,6 @@ static unsigned char tw_sense_table[][4] =
#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */ #define TW_IOCTL_TIMEOUT 25 /* 25 seconds */
#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */ #define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
#define TW_IOCTL_CHRDEV_FREE -1 #define TW_IOCTL_CHRDEV_FREE -1
#define TW_DMA_MASK DMA_BIT_MASK(32)
#define TW_MAX_CDB_LEN 16 #define TW_MAX_CDB_LEN 16
/* Bitmask macros to eliminate bitfields */ /* Bitmask macros to eliminate bitfields */

View File

@@ -90,7 +90,7 @@ struct NCR_700_Device_Parameters {
/* The SYNC negotiation sequence looks like: /* The SYNC negotiation sequence looks like:
* *
* If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the * If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the
* initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTIATION
* If we get an SDTR reply, work out the SXFER parameters, squirrel * If we get an SDTR reply, work out the SXFER parameters, squirrel
* them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set * them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set
* DEV_NEGOTIATED_SYNC. If we get a REJECT msg, squirrel * DEV_NEGOTIATED_SYNC. If we get a REJECT msg, squirrel

View File

@@ -201,8 +201,8 @@ static bool __init blogic_create_initccbs(struct blogic_adapter *adapter)
dma_addr_t blkp; dma_addr_t blkp;
while (adapter->alloc_ccbs < adapter->initccbs) { while (adapter->alloc_ccbs < adapter->initccbs) {
blk_pointer = pci_alloc_consistent(adapter->pci_device, blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
blk_size, &blkp); blk_size, &blkp, GFP_KERNEL);
if (blk_pointer == NULL) { if (blk_pointer == NULL) {
blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n", blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n",
adapter); adapter);
@@ -227,15 +227,16 @@ static void blogic_destroy_ccbs(struct blogic_adapter *adapter)
next_ccb = ccb->next_all; next_ccb = ccb->next_all;
if (ccb->allocgrp_head) { if (ccb->allocgrp_head) {
if (lastccb) if (lastccb)
pci_free_consistent(adapter->pci_device, dma_free_coherent(&adapter->pci_device->dev,
lastccb->allocgrp_size, lastccb, lastccb->allocgrp_size, lastccb,
lastccb->allocgrp_head); lastccb->allocgrp_head);
lastccb = ccb; lastccb = ccb;
} }
} }
if (lastccb) if (lastccb)
pci_free_consistent(adapter->pci_device, lastccb->allocgrp_size, dma_free_coherent(&adapter->pci_device->dev,
lastccb, lastccb->allocgrp_head); lastccb->allocgrp_size, lastccb,
lastccb->allocgrp_head);
} }
@@ -256,8 +257,8 @@ static void blogic_create_addlccbs(struct blogic_adapter *adapter,
if (addl_ccbs <= 0) if (addl_ccbs <= 0)
return; return;
while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) { while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) {
blk_pointer = pci_alloc_consistent(adapter->pci_device, blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
blk_size, &blkp); blk_size, &blkp, GFP_KERNEL);
if (blk_pointer == NULL) if (blk_pointer == NULL)
break; break;
blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp); blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp);
@@ -318,8 +319,8 @@ static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
if (ccb->command != NULL) if (ccb->command != NULL)
scsi_dma_unmap(ccb->command); scsi_dma_unmap(ccb->command);
if (dma_unmap) if (dma_unmap)
pci_unmap_single(adapter->pci_device, ccb->sensedata, dma_unmap_single(&adapter->pci_device->dev, ccb->sensedata,
ccb->sense_datalen, PCI_DMA_FROMDEVICE); ccb->sense_datalen, DMA_FROM_DEVICE);
ccb->command = NULL; ccb->command = NULL;
ccb->status = BLOGIC_CCB_FREE; ccb->status = BLOGIC_CCB_FREE;
@@ -712,7 +713,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device)) if (pci_enable_device(pci_device))
continue; continue;
if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32))) if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue; continue;
bus = pci_device->bus->number; bus = pci_device->bus->number;
@@ -895,7 +896,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device)) if (pci_enable_device(pci_device))
continue; continue;
if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32))) if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue; continue;
bus = pci_device->bus->number; bus = pci_device->bus->number;
@@ -952,7 +953,7 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device)) if (pci_enable_device(pci_device))
continue; continue;
if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32))) if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue; continue;
bus = pci_device->bus->number; bus = pci_device->bus->number;
@@ -2040,7 +2041,7 @@ static void blogic_relres(struct blogic_adapter *adapter)
Release any allocated memory structs not released elsewhere Release any allocated memory structs not released elsewhere
*/ */
if (adapter->mbox_space) if (adapter->mbox_space)
pci_free_consistent(adapter->pci_device, adapter->mbox_sz, dma_free_coherent(&adapter->pci_device->dev, adapter->mbox_sz,
adapter->mbox_space, adapter->mbox_space_handle); adapter->mbox_space, adapter->mbox_space_handle);
pci_dev_put(adapter->pci_device); pci_dev_put(adapter->pci_device);
adapter->mbox_space = NULL; adapter->mbox_space = NULL;
@@ -2092,8 +2093,9 @@ static bool blogic_initadapter(struct blogic_adapter *adapter)
Initialize the Outgoing and Incoming Mailbox pointers. Initialize the Outgoing and Incoming Mailbox pointers.
*/ */
adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox)); adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox));
adapter->mbox_space = pci_alloc_consistent(adapter->pci_device, adapter->mbox_space = dma_alloc_coherent(&adapter->pci_device->dev,
adapter->mbox_sz, &adapter->mbox_space_handle); adapter->mbox_sz, &adapter->mbox_space_handle,
GFP_KERNEL);
if (adapter->mbox_space == NULL) if (adapter->mbox_space == NULL)
return blogic_failure(adapter, "MAILBOX ALLOCATION"); return blogic_failure(adapter, "MAILBOX ALLOCATION");
adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space; adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space;
@@ -3183,9 +3185,9 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
memcpy(ccb->cdb, cdb, cdblen); memcpy(ccb->cdb, cdb, cdblen);
ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE; ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
ccb->command = command; ccb->command = command;
sense_buf = pci_map_single(adapter->pci_device, sense_buf = dma_map_single(&adapter->pci_device->dev,
command->sense_buffer, ccb->sense_datalen, command->sense_buffer, ccb->sense_datalen,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) { if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
blogic_err("DMA mapping for sense data buffer failed\n", blogic_err("DMA mapping for sense data buffer failed\n",
adapter); adapter);

View File

@@ -2944,7 +2944,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
} }
if (currSCCB->Lun == 0x00) { if (currSCCB->Lun == 0x00) {
if ((currSCCB->Sccb_scsistat == SELECT_SN_ST)) { if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
currTar_Info->TarStatus |= currTar_Info->TarStatus |=
(unsigned char)SYNC_SUPPORTED; (unsigned char)SYNC_SUPPORTED;
@@ -2953,8 +2953,8 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
~EE_SYNC_MASK; ~EE_SYNC_MASK;
} }
else if ((currSCCB->Sccb_scsistat == else if (currSCCB->Sccb_scsistat ==
SELECT_WN_ST)) { SELECT_WN_ST) {
currTar_Info->TarStatus = currTar_Info->TarStatus =
(currTar_Info-> (currTar_Info->

View File

@@ -42,6 +42,9 @@ config SCSI_DMA
bool bool
default n default n
config SCSI_ESP_PIO
bool
config SCSI_NETLINK config SCSI_NETLINK
bool bool
default n default n
@@ -557,6 +560,36 @@ config SCSI_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may not substantial, so users of MultiMaster Host Adapters may not
wish to include it. wish to include it.
config SCSI_MYRB
tristate "Mylex DAC960/DAC1100 PCI RAID Controller (Block Interface)"
depends on PCI
select RAID_ATTRS
help
This driver adds support for the Mylex DAC960, AcceleRAID, and
eXtremeRAID PCI RAID controllers. This driver supports the
older, block based interface.
This driver is a reimplementation of the original DAC960
driver. If you have used the DAC960 driver you should enable
this module.
To compile this driver as a module, choose M here: the
module will be called myrb.
config SCSI_MYRS
tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
depends on PCI
select RAID_ATTRS
help
This driver adds support for the Mylex DAC960, AcceleRAID, and
eXtremeRAID PCI RAID controllers. This driver supports the
newer, SCSI-based interface only.
This driver is a reimplementation of the original DAC960
driver. If you have used the DAC960 driver you should enable
this module.
To compile this driver as a module, choose M here: the
module will be called myrs.
config VMWARE_PVSCSI config VMWARE_PVSCSI
tristate "VMware PVSCSI driver support" tristate "VMware PVSCSI driver support"
depends on PCI && SCSI && X86 depends on PCI && SCSI && X86
@@ -1332,6 +1365,7 @@ config SCSI_ZORRO_ESP
tristate "Zorro ESP SCSI support" tristate "Zorro ESP SCSI support"
depends on ZORRO && SCSI depends on ZORRO && SCSI
select SCSI_SPI_ATTRS select SCSI_SPI_ATTRS
select SCSI_ESP_PIO
help help
Support for various NCR53C9x (ESP) based SCSI controllers on Zorro Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
expansion boards for the Amiga. expansion boards for the Amiga.
@@ -1374,6 +1408,7 @@ config SCSI_MAC_ESP
tristate "Macintosh NCR53c9[46] SCSI" tristate "Macintosh NCR53c9[46] SCSI"
depends on MAC && SCSI depends on MAC && SCSI
select SCSI_SPI_ATTRS select SCSI_SPI_ATTRS
select SCSI_ESP_PIO
help help
This is the NCR 53c9x SCSI controller found on most of the 68040 This is the NCR 53c9x SCSI controller found on most of the 68040
based Macintoshes. based Macintoshes.

View File

@@ -106,6 +106,8 @@ obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
obj-$(CONFIG_SCSI_MESH) += mesh.o obj-$(CONFIG_SCSI_MESH) += mesh.o
obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
obj-$(CONFIG_SCSI_MYRB) += myrb.o
obj-$(CONFIG_SCSI_MYRS) += myrs.o
obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o

View File

@@ -131,6 +131,7 @@
static int do_abort(struct Scsi_Host *); static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *); static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *);
/** /**
* initialize_SCp - init the scsi pointer field * initialize_SCp - init the scsi pointer field
@@ -513,16 +514,15 @@ static void complete_cmd(struct Scsi_Host *instance,
if (hostdata->sensing == cmd) { if (hostdata->sensing == cmd) {
/* Autosense processing ends here */ /* Autosense processing ends here */
if ((cmd->result & 0xff) != SAM_STAT_GOOD) { if (status_byte(cmd->result) != GOOD) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses); scsi_eh_restore_cmnd(cmd, &hostdata->ses);
set_host_byte(cmd, DID_ERROR); } else {
} else
scsi_eh_restore_cmnd(cmd, &hostdata->ses); scsi_eh_restore_cmnd(cmd, &hostdata->ses);
set_driver_byte(cmd, DRIVER_SENSE);
}
hostdata->sensing = NULL; hostdata->sensing = NULL;
} }
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
} }
@@ -884,7 +884,14 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
/* Probably Bus Reset */ /* Probably Bus Reset */
NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_read(RESET_PARITY_INTERRUPT_REG);
dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n"); if (sr & SR_RST) {
/* Certainly Bus Reset */
shost_printk(KERN_WARNING, instance,
"bus reset interrupt\n");
bus_reset_cleanup(instance);
} else {
dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
}
#ifdef SUN3_SCSI_VME #ifdef SUN3_SCSI_VME
dregs->csr |= CSR_DMA_ENABLE; dregs->csr |= CSR_DMA_ENABLE;
#endif #endif
@@ -902,20 +909,16 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
return IRQ_RETVAL(handled); return IRQ_RETVAL(handled);
} }
/* /**
* Function : int NCR5380_select(struct Scsi_Host *instance, * NCR5380_select - attempt arbitration and selection for a given command
* struct scsi_cmnd *cmd) * @instance: the Scsi_Host instance
* @cmd: the scsi_cmnd to execute
* *
* Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, * This routine establishes an I_T_L nexus for a SCSI command. This involves
* including ARBITRATION, SELECTION, and initial message out for * ARBITRATION, SELECTION and MESSAGE OUT phases and an IDENTIFY message.
* IDENTIFY and queue messages.
* *
* Inputs : instance - instantiation of the 5380 driver on which this * Returns true if the operation should be retried.
* target lives, cmd - SCSI command to execute. * Returns false if it should not be retried.
*
* Returns cmd if selection failed but should be retried,
* NULL if selection failed and should not be retried, or
* NULL if selection succeeded (hostdata->connected == cmd).
* *
* Side effects : * Side effects :
* If bus busy, arbitration failed, etc, NCR5380_select() will exit * If bus busy, arbitration failed, etc, NCR5380_select() will exit
@@ -923,16 +926,15 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
* SELECT_ENABLE will be set appropriately, the NCR5380 * SELECT_ENABLE will be set appropriately, the NCR5380
* will cease to drive any SCSI bus signals. * will cease to drive any SCSI bus signals.
* *
* If successful : I_T_L or I_T_L_Q nexus will be established, * If successful : the I_T_L nexus will be established, and
* instance->connected will be set to cmd. * hostdata->connected will be set to cmd.
* SELECT interrupt will be disabled. * SELECT interrupt will be disabled.
* *
* If failed (no target) : cmd->scsi_done() will be called, and the * If failed (no target) : cmd->scsi_done() will be called, and the
* cmd->result host byte set to DID_BAD_TARGET. * cmd->result host byte set to DID_BAD_TARGET.
*/ */
static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance, static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
struct scsi_cmnd *cmd)
__releases(&hostdata->lock) __acquires(&hostdata->lock) __releases(&hostdata->lock) __acquires(&hostdata->lock)
{ {
struct NCR5380_hostdata *hostdata = shost_priv(instance); struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -940,6 +942,9 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
unsigned char *data; unsigned char *data;
int len; int len;
int err; int err;
bool ret = true;
bool can_disconnect = instance->irq != NO_IRQ &&
cmd->cmnd[0] != REQUEST_SENSE;
NCR5380_dprint(NDEBUG_ARBITRATION, instance); NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@@ -948,7 +953,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/* /*
* Arbitration and selection phases are slow and involve dropping the * Arbitration and selection phases are slow and involve dropping the
* lock, so we have to watch out for EH. An exception handler may * lock, so we have to watch out for EH. An exception handler may
* change 'selecting' to NULL. This function will then return NULL * change 'selecting' to NULL. This function will then return false
* so that the caller will forget about 'cmd'. (During information * so that the caller will forget about 'cmd'. (During information
* transfer phases, EH may change 'connected' to NULL.) * transfer phases, EH may change 'connected' to NULL.)
*/ */
@@ -984,7 +989,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (!hostdata->selecting) { if (!hostdata->selecting) {
/* Command was aborted */ /* Command was aborted */
NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(MODE_REG, MR_BASE);
goto out; return false;
} }
if (err < 0) { if (err < 0) {
NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(MODE_REG, MR_BASE);
@@ -1033,7 +1038,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (!hostdata->selecting) { if (!hostdata->selecting) {
NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
goto out; return false;
} }
dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n"); dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
@@ -1116,13 +1121,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
spin_lock_irq(&hostdata->lock); spin_lock_irq(&hostdata->lock);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
/* Can't touch cmd if it has been reclaimed by the scsi ML */ /* Can't touch cmd if it has been reclaimed by the scsi ML */
if (hostdata->selecting) { if (!hostdata->selecting)
cmd->result = DID_BAD_TARGET << 16; return false;
complete_cmd(instance, cmd);
dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n"); cmd->result = DID_BAD_TARGET << 16;
cmd = NULL; complete_cmd(instance, cmd);
} dsprintk(NDEBUG_SELECTION, instance,
"target did not respond within 250ms\n");
ret = false;
goto out; goto out;
} }
@@ -1155,12 +1163,12 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
} }
if (!hostdata->selecting) { if (!hostdata->selecting) {
do_abort(instance); do_abort(instance);
goto out; return false;
} }
dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n", dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
scmd_id(cmd)); scmd_id(cmd));
tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun); tmp[0] = IDENTIFY(can_disconnect, cmd->device->lun);
len = 1; len = 1;
data = tmp; data = tmp;
@@ -1171,7 +1179,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
cmd->result = DID_ERROR << 16; cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd); complete_cmd(instance, cmd);
dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n"); dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n");
cmd = NULL; ret = false;
goto out; goto out;
} }
@@ -1186,13 +1194,13 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
initialize_SCp(cmd); initialize_SCp(cmd);
cmd = NULL; ret = false;
out: out:
if (!hostdata->selecting) if (!hostdata->selecting)
return NULL; return NULL;
hostdata->selecting = NULL; hostdata->selecting = NULL;
return cmd; return ret;
} }
/* /*
@@ -1711,6 +1719,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result = DID_ERROR << 16; cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd); complete_cmd(instance, cmd);
hostdata->connected = NULL; hostdata->connected = NULL;
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
return; return;
#endif #endif
case PHASE_DATAIN: case PHASE_DATAIN:
@@ -1793,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd, scmd_id(cmd), cmd->device->lun); cmd, scmd_id(cmd), cmd->device->lun);
hostdata->connected = NULL; hostdata->connected = NULL;
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result &= ~0xffff; cmd->result &= ~0xffff;
cmd->result |= cmd->SCp.Status; cmd->result |= cmd->SCp.Status;
@@ -1951,6 +1961,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_transfer_pio(instance, &phase, &len, &data); NCR5380_transfer_pio(instance, &phase, &len, &data);
if (msgout == ABORT) { if (msgout == ABORT) {
hostdata->connected = NULL; hostdata->connected = NULL;
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result = DID_ERROR << 16; cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd); complete_cmd(instance, cmd);
maybe_release_dma_irq(instance); maybe_release_dma_irq(instance);
@@ -2014,8 +2025,11 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(MODE_REG, MR_BASE);
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
if (!target_mask || target_mask & (target_mask - 1)) {
dsprintk(NDEBUG_RESELECTION, instance, "reselect\n"); shost_printk(KERN_WARNING, instance,
"reselect: bad target_mask 0x%02x\n", target_mask);
return;
}
/* /*
* At this point, we have detected that our SCSI ID is on the bus, * At this point, we have detected that our SCSI ID is on the bus,
@@ -2029,6 +2043,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
if (NCR5380_poll_politely(hostdata, if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) { STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n");
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return; return;
} }
@@ -2040,6 +2055,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
if (NCR5380_poll_politely(hostdata, if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) { STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0)
/* BUS FREE phase */
return;
shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n");
do_abort(instance); do_abort(instance);
return; return;
} }
@@ -2101,13 +2120,16 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance, dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance,
"reselect: removed %p from disconnected queue\n", tmp); "reselect: removed %p from disconnected queue\n", tmp);
} else { } else {
int target = ffs(target_mask) - 1;
shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n", shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n",
target_mask, lun); target_mask, lun);
/* /*
* Since we have an established nexus that we can't do anything * Since we have an established nexus that we can't do anything
* with, we must abort it. * with, we must abort it.
*/ */
do_abort(instance); if (do_abort(instance) == 0)
hostdata->busy[target] &= ~(1 << lun);
return; return;
} }
@@ -2272,15 +2294,16 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (list_del_cmd(&hostdata->autosense, cmd)) { if (list_del_cmd(&hostdata->autosense, cmd)) {
dsprintk(NDEBUG_ABORT, instance, dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from sense queue\n", cmd); "abort: removed %p from sense queue\n", cmd);
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd); complete_cmd(instance, cmd);
} }
out: out:
if (result == FAILED) if (result == FAILED)
dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd); dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd);
else else {
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd); dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
}
queue_work(hostdata->work_q, &hostdata->main_task); queue_work(hostdata->work_q, &hostdata->main_task);
maybe_release_dma_irq(instance); maybe_release_dma_irq(instance);
@@ -2290,31 +2313,12 @@ out:
} }
/** static void bus_reset_cleanup(struct Scsi_Host *instance)
* NCR5380_host_reset - reset the SCSI host
* @cmd: SCSI command undergoing EH
*
* Returns SUCCESS
*/
static int NCR5380_host_reset(struct scsi_cmnd *cmd)
{ {
struct Scsi_Host *instance = cmd->device->host;
struct NCR5380_hostdata *hostdata = shost_priv(instance); struct NCR5380_hostdata *hostdata = shost_priv(instance);
int i; int i;
unsigned long flags;
struct NCR5380_cmd *ncmd; struct NCR5380_cmd *ncmd;
spin_lock_irqsave(&hostdata->lock, flags);
#if (NDEBUG & NDEBUG_ANY)
scmd_printk(KERN_INFO, cmd, __func__);
#endif
NCR5380_dprint(NDEBUG_ANY, instance);
NCR5380_dprint_phase(NDEBUG_ANY, instance);
do_reset(instance);
/* reset NCR registers */ /* reset NCR registers */
NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(TARGET_COMMAND_REG, 0);
@@ -2326,11 +2330,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
* commands! * commands!
*/ */
if (list_del_cmd(&hostdata->unissued, cmd)) {
cmd->result = DID_RESET << 16;
cmd->scsi_done(cmd);
}
if (hostdata->selecting) { if (hostdata->selecting) {
hostdata->selecting->result = DID_RESET << 16; hostdata->selecting->result = DID_RESET << 16;
complete_cmd(instance, hostdata->selecting); complete_cmd(instance, hostdata->selecting);
@@ -2348,7 +2347,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
list_for_each_entry(ncmd, &hostdata->autosense, list) { list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd); struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
} }
INIT_LIST_HEAD(&hostdata->autosense); INIT_LIST_HEAD(&hostdata->autosense);
@@ -2365,6 +2363,41 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
queue_work(hostdata->work_q, &hostdata->main_task); queue_work(hostdata->work_q, &hostdata->main_task);
maybe_release_dma_irq(instance); maybe_release_dma_irq(instance);
}
/**
* NCR5380_host_reset - reset the SCSI host
* @cmd: SCSI command undergoing EH
*
* Returns SUCCESS
*/
static int NCR5380_host_reset(struct scsi_cmnd *cmd)
{
struct Scsi_Host *instance = cmd->device->host;
struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned long flags;
struct NCR5380_cmd *ncmd;
spin_lock_irqsave(&hostdata->lock, flags);
#if (NDEBUG & NDEBUG_ANY)
shost_printk(KERN_INFO, instance, __func__);
#endif
NCR5380_dprint(NDEBUG_ANY, instance);
NCR5380_dprint_phase(NDEBUG_ANY, instance);
list_for_each_entry(ncmd, &hostdata->unissued, list) {
struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd);
scmd->result = DID_RESET << 16;
scmd->scsi_done(scmd);
}
INIT_LIST_HEAD(&hostdata->unissued);
do_reset(instance);
bus_reset_cleanup(instance);
spin_unlock_irqrestore(&hostdata->lock, flags); spin_unlock_irqrestore(&hostdata->lock, flags);
return SUCCESS; return SUCCESS;

View File

@@ -275,7 +275,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);
static void NCR5380_main(struct work_struct *work); static void NCR5380_main(struct work_struct *work);
static const char *NCR5380_info(struct Scsi_Host *instance); static const char *NCR5380_info(struct Scsi_Host *instance);
static void NCR5380_reselect(struct Scsi_Host *instance); static void NCR5380_reselect(struct Scsi_Host *instance);
static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_poll_politely2(struct NCR5380_hostdata *, static int NCR5380_poll_politely2(struct NCR5380_hostdata *,

View File

@@ -1094,7 +1094,7 @@ static int inia100_probe_one(struct pci_dev *pdev,
if (pci_enable_device(pdev)) if (pci_enable_device(pdev))
goto out; goto out;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "Unable to set 32bit DMA " printk(KERN_WARNING "Unable to set 32bit DMA "
"on inia100 adapter, ignoring.\n"); "on inia100 adapter, ignoring.\n");
goto out_disable_device; goto out_disable_device;
@@ -1124,7 +1124,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for SCB */ /* Get total memory needed for SCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_scb); sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys); host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys,
GFP_KERNEL);
if (!host->scb_virt) { if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n"); printk("inia100: SCB memory allocation error\n");
goto out_host_put; goto out_host_put;
@@ -1132,7 +1133,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for ESCB */ /* Get total memory needed for ESCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys); host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys,
GFP_KERNEL);
if (!host->escb_virt) { if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n"); printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array; goto out_free_scb_array;
@@ -1177,10 +1179,12 @@ static int inia100_probe_one(struct pci_dev *pdev,
out_free_irq: out_free_irq:
free_irq(shost->irq, shost); free_irq(shost->irq, shost);
out_free_escb_array: out_free_escb_array:
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb), dma_free_coherent(&pdev->dev,
ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys); host->escb_virt, host->escb_phys);
out_free_scb_array: out_free_scb_array:
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb), dma_free_coherent(&pdev->dev,
ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys); host->scb_virt, host->scb_phys);
out_host_put: out_host_put:
scsi_host_put(shost); scsi_host_put(shost);
@@ -1200,9 +1204,11 @@ static void inia100_remove_one(struct pci_dev *pdev)
scsi_remove_host(shost); scsi_remove_host(shost);
free_irq(shost->irq, shost); free_irq(shost->irq, shost);
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb), dma_free_coherent(&pdev->dev,
ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys); host->escb_virt, host->escb_phys);
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb), dma_free_coherent(&pdev->dev,
ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys); host->scb_virt, host->scb_phys);
release_region(shost->io_port, 256); release_region(shost->io_port, 256);

View File

@@ -3480,7 +3480,6 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
static void aac_srb_callback(void *context, struct fib * fibptr) static void aac_srb_callback(void *context, struct fib * fibptr)
{ {
struct aac_dev *dev;
struct aac_srb_reply *srbreply; struct aac_srb_reply *srbreply;
struct scsi_cmnd *scsicmd; struct scsi_cmnd *scsicmd;
@@ -3491,8 +3490,6 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
BUG_ON(fibptr == NULL); BUG_ON(fibptr == NULL);
dev = fibptr->dev;
srbreply = (struct aac_srb_reply *) fib_data(fibptr); srbreply = (struct aac_srb_reply *) fib_data(fibptr);
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
@@ -3921,13 +3918,11 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg) static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
{ {
struct aac_dev *dev;
unsigned long byte_count = 0; unsigned long byte_count = 0;
int nseg; int nseg;
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data // Get rid of old data
psg->count = 0; psg->count = 0;
psg->sg[0].addr = 0; psg->sg[0].addr = 0;
@@ -3963,14 +3958,12 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg) static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
{ {
struct aac_dev *dev;
unsigned long byte_count = 0; unsigned long byte_count = 0;
u64 addr; u64 addr;
int nseg; int nseg;
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data // Get rid of old data
psg->count = 0; psg->count = 0;
psg->sg[0].addr[0] = 0; psg->sg[0].addr[0] = 0;

View File

@@ -2586,9 +2586,7 @@ int aac_acquire_irq(struct aac_dev *dev)
void aac_free_irq(struct aac_dev *dev) void aac_free_irq(struct aac_dev *dev)
{ {
int i; int i;
int cpu;
cpu = cpumask_first(cpu_online_mask);
if (aac_is_src(dev)) { if (aac_is_src(dev)) {
if (dev->max_msix > 1) { if (dev->max_msix > 1) {
for (i = 0; i < dev->max_msix; i++) for (i = 0; i < dev->max_msix; i++)

View File

@@ -5949,7 +5949,6 @@ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code)
static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
{ {
struct asc_board *boardp = adv_dvc_varp->drv_ptr; struct asc_board *boardp = adv_dvc_varp->drv_ptr;
u32 srb_tag;
adv_req_t *reqp; adv_req_t *reqp;
adv_sgblk_t *sgblkp; adv_sgblk_t *sgblkp;
struct scsi_cmnd *scp; struct scsi_cmnd *scp;
@@ -5965,7 +5964,6 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
* completed. The adv_req_t structure actually contains the * completed. The adv_req_t structure actually contains the
* completed ADV_SCSI_REQ_Q structure. * completed ADV_SCSI_REQ_Q structure.
*/ */
srb_tag = le32_to_cpu(scsiqp->srb_tag);
scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag); scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag);
ASC_DBG(1, "scp 0x%p\n", scp); ASC_DBG(1, "scp 0x%p\n", scp);
@@ -6448,7 +6446,7 @@ static void AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
sdtr_data = sdtr_data =
AscCalSDTRData(asc_dvc, ext_msg.xfer_period, AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
ext_msg.req_ack_offset); ext_msg.req_ack_offset);
if ((sdtr_data == 0xFF)) { if (sdtr_data == 0xFF) {
q_cntl |= QC_MSG_OUT; q_cntl |= QC_MSG_OUT;
asc_dvc->init_sdtr &= ~target_id; asc_dvc->init_sdtr &= ~target_id;

View File

@@ -42,15 +42,9 @@
* $FreeBSD$ * $FreeBSD$
*/ */
#ifdef __linux__
#include "aic7xxx_osm.h" #include "aic7xxx_osm.h"
#include "aic7xxx_inline.h" #include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h" #include "aic7xxx_93cx6.h"
#else
#include <dev/aic7xxx/aic7xxx_osm.h>
#include <dev/aic7xxx/aic7xxx_inline.h>
#include <dev/aic7xxx/aic7xxx_93cx6.h>
#endif
#define ID_AIC7770 0x04907770 #define ID_AIC7770 0x04907770
#define ID_AHA_274x 0x04907771 #define ID_AHA_274x 0x04907771

View File

@@ -607,9 +607,6 @@ struct scb {
ahd_io_ctx_t io_ctx; ahd_io_ctx_t io_ctx;
struct ahd_softc *ahd_softc; struct ahd_softc *ahd_softc;
scb_flag flags; scb_flag flags;
#ifndef __linux__
bus_dmamap_t dmamap;
#endif
struct scb_platform_data *platform_data; struct scb_platform_data *platform_data;
struct map_node *hscb_map; struct map_node *hscb_map;
struct map_node *sg_map; struct map_node *sg_map;
@@ -1056,9 +1053,6 @@ struct ahd_completion
struct ahd_softc { struct ahd_softc {
bus_space_tag_t tags[2]; bus_space_tag_t tags[2];
bus_space_handle_t bshs[2]; bus_space_handle_t bshs[2];
#ifndef __linux__
bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
#endif
struct scb_data scb_data; struct scb_data scb_data;
struct hardware_scb *next_queued_hscb; struct hardware_scb *next_queued_hscb;

View File

@@ -40,16 +40,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $ * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $
*/ */
#ifdef __linux__
#include "aic79xx_osm.h" #include "aic79xx_osm.h"
#include "aic79xx_inline.h" #include "aic79xx_inline.h"
#include "aicasm/aicasm_insformat.h" #include "aicasm/aicasm_insformat.h"
#else
#include <dev/aic7xxx/aic79xx_osm.h>
#include <dev/aic7xxx/aic79xx_inline.h>
#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
#endif
/***************************** Lookup Tables **********************************/ /***************************** Lookup Tables **********************************/
static const char *const ahd_chip_names[] = static const char *const ahd_chip_names[] =
@@ -59,7 +52,6 @@ static const char *const ahd_chip_names[] =
"aic7902", "aic7902",
"aic7901A" "aic7901A"
}; };
static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
/* /*
* Hardware error codes. * Hardware error codes.
@@ -6172,17 +6164,11 @@ ahd_free(struct ahd_softc *ahd)
case 2: case 2:
ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
case 1: case 1:
#ifndef __linux__
ahd_dma_tag_destroy(ahd, ahd->buffer_dmat);
#endif
break; break;
case 0: case 0:
break; break;
} }
#ifndef __linux__
ahd_dma_tag_destroy(ahd, ahd->parent_dmat);
#endif
ahd_platform_free(ahd); ahd_platform_free(ahd);
ahd_fini_scbdata(ahd); ahd_fini_scbdata(ahd);
for (i = 0; i < AHD_NUM_TARGETS; i++) { for (i = 0; i < AHD_NUM_TARGETS; i++) {
@@ -6934,9 +6920,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
for (i = 0; i < newcount; i++) { for (i = 0; i < newcount; i++) {
struct scb_platform_data *pdata; struct scb_platform_data *pdata;
u_int col_tag; u_int col_tag;
#ifndef __linux__
int error;
#endif
next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC); next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC);
if (next_scb == NULL) if (next_scb == NULL)
@@ -6970,15 +6953,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
next_scb->ahd_softc = ahd; next_scb->ahd_softc = ahd;
next_scb->flags = SCB_FLAG_NONE; next_scb->flags = SCB_FLAG_NONE;
#ifndef __linux__
error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
&next_scb->dmamap);
if (error != 0) {
kfree(next_scb);
kfree(pdata);
break;
}
#endif
next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); next_scb->hscb->tag = ahd_htole16(scb_data->numscbs);
col_tag = scb_data->numscbs ^ 0x100; col_tag = scb_data->numscbs ^ 0x100;
next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
@@ -7091,24 +7065,6 @@ ahd_init(struct ahd_softc *ahd)
if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0)
ahd->features &= ~AHD_TARGETMODE; ahd->features &= ~AHD_TARGETMODE;
#ifndef __linux__
/* DMA tag for mapping buffers into device visible space. */
if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
/*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
/*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING
? (dma_addr_t)0x7FFFFFFFFFULL
: BUS_SPACE_MAXADDR_32BIT,
/*highaddr*/BUS_SPACE_MAXADDR,
/*filter*/NULL, /*filterarg*/NULL,
/*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE,
/*nsegments*/AHD_NSEG,
/*maxsegsz*/AHD_MAXTRANSFER_SIZE,
/*flags*/BUS_DMA_ALLOCNOW,
&ahd->buffer_dmat) != 0) {
return (ENOMEM);
}
#endif
ahd->init_level++; ahd->init_level++;
/* /*

View File

@@ -41,14 +41,8 @@
* $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $ * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $
*/ */
#ifdef __linux__
#include "aic79xx_osm.h" #include "aic79xx_osm.h"
#include "aic79xx_inline.h" #include "aic79xx_inline.h"
#else
#include <dev/aic7xxx/aic79xx_osm.h>
#include <dev/aic7xxx/aic79xx_inline.h>
#endif
#include "aic79xx_pci.h" #include "aic79xx_pci.h"
static inline uint64_t static inline uint64_t
@@ -294,13 +288,11 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
int int
ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry) ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
{ {
struct scb_data *shared_scb_data;
u_int command; u_int command;
uint32_t devconfig; uint32_t devconfig;
uint16_t subvendor; uint16_t subvendor;
int error; int error;
shared_scb_data = NULL;
ahd->description = entry->name; ahd->description = entry->name;
/* /*
* Record if this is an HP board. * Record if this is an HP board.

View File

@@ -568,9 +568,6 @@ struct scb {
ahc_io_ctx_t io_ctx; ahc_io_ctx_t io_ctx;
struct ahc_softc *ahc_softc; struct ahc_softc *ahc_softc;
scb_flag flags; scb_flag flags;
#ifndef __linux__
bus_dmamap_t dmamap;
#endif
struct scb_platform_data *platform_data; struct scb_platform_data *platform_data;
struct sg_map_node *sg_map; struct sg_map_node *sg_map;
struct ahc_dma_seg *sg_list; struct ahc_dma_seg *sg_list;
@@ -906,9 +903,6 @@ typedef void ahc_callback_t (void *);
struct ahc_softc { struct ahc_softc {
bus_space_tag_t tag; bus_space_tag_t tag;
bus_space_handle_t bsh; bus_space_handle_t bsh;
#ifndef __linux__
bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
#endif
struct scb_data *scb_data; struct scb_data *scb_data;
struct scb *next_queued_scb; struct scb *next_queued_scb;

View File

@@ -64,15 +64,9 @@
* bit to be sent from the chip. * bit to be sent from the chip.
*/ */
#ifdef __linux__
#include "aic7xxx_osm.h" #include "aic7xxx_osm.h"
#include "aic7xxx_inline.h" #include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h" #include "aic7xxx_93cx6.h"
#else
#include <dev/aic7xxx/aic7xxx_osm.h>
#include <dev/aic7xxx/aic7xxx_inline.h>
#include <dev/aic7xxx/aic7xxx_93cx6.h>
#endif
/* /*
* Right now, we only have to read the SEEPROM. But we make it easier to * Right now, we only have to read the SEEPROM. But we make it easier to

View File

@@ -40,15 +40,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
*/ */
#ifdef __linux__
#include "aic7xxx_osm.h" #include "aic7xxx_osm.h"
#include "aic7xxx_inline.h" #include "aic7xxx_inline.h"
#include "aicasm/aicasm_insformat.h" #include "aicasm/aicasm_insformat.h"
#else
#include <dev/aic7xxx/aic7xxx_osm.h>
#include <dev/aic7xxx/aic7xxx_inline.h>
#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
#endif
/***************************** Lookup Tables **********************************/ /***************************** Lookup Tables **********************************/
static const char *const ahc_chip_names[] = { static const char *const ahc_chip_names[] = {
@@ -67,7 +61,6 @@ static const char *const ahc_chip_names[] = {
"aic7892", "aic7892",
"aic7899" "aic7899"
}; };
static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
/* /*
* Hardware error codes. * Hardware error codes.
@@ -4509,17 +4502,11 @@ ahc_free(struct ahc_softc *ahc)
case 2: case 2:
ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat); ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
case 1: case 1:
#ifndef __linux__
ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
#endif
break; break;
case 0: case 0:
break; break;
} }
#ifndef __linux__
ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
#endif
ahc_platform_free(ahc); ahc_platform_free(ahc);
ahc_fini_scbdata(ahc); ahc_fini_scbdata(ahc);
for (i = 0; i < AHC_NUM_TARGETS; i++) { for (i = 0; i < AHC_NUM_TARGETS; i++) {
@@ -5005,9 +4992,7 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs)); newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
for (i = 0; i < newcount; i++) { for (i = 0; i < newcount; i++) {
struct scb_platform_data *pdata; struct scb_platform_data *pdata;
#ifndef __linux__
int error;
#endif
pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
if (pdata == NULL) if (pdata == NULL)
break; break;
@@ -5021,12 +5006,6 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
next_scb->ahc_softc = ahc; next_scb->ahc_softc = ahc;
next_scb->flags = SCB_FREE; next_scb->flags = SCB_FREE;
#ifndef __linux__
error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
&next_scb->dmamap);
if (error != 0)
break;
#endif
next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
next_scb->hscb->tag = ahc->scb_data->numscbs; next_scb->hscb->tag = ahc->scb_data->numscbs;
SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
@@ -5325,24 +5304,6 @@ ahc_init(struct ahc_softc *ahc)
if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0) if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
ahc->features &= ~AHC_TARGETMODE; ahc->features &= ~AHC_TARGETMODE;
#ifndef __linux__
/* DMA tag for mapping buffers into device visible space. */
if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
/*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
/*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
? (dma_addr_t)0x7FFFFFFFFFULL
: BUS_SPACE_MAXADDR_32BIT,
/*highaddr*/BUS_SPACE_MAXADDR,
/*filter*/NULL, /*filterarg*/NULL,
/*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
/*nsegments*/AHC_NSEG,
/*maxsegsz*/AHC_MAXTRANSFER_SIZE,
/*flags*/BUS_DMA_ALLOCNOW,
&ahc->buffer_dmat) != 0) {
return (ENOMEM);
}
#endif
ahc->init_level++; ahc->init_level++;
/* /*

View File

@@ -42,16 +42,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $
*/ */
#ifdef __linux__
#include "aic7xxx_osm.h" #include "aic7xxx_osm.h"
#include "aic7xxx_inline.h" #include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h" #include "aic7xxx_93cx6.h"
#else
#include <dev/aic7xxx/aic7xxx_osm.h>
#include <dev/aic7xxx/aic7xxx_inline.h>
#include <dev/aic7xxx/aic7xxx_93cx6.h>
#endif
#include "aic7xxx_pci.h" #include "aic7xxx_pci.h"
static inline uint64_t static inline uint64_t

View File

@@ -42,11 +42,7 @@
* $FreeBSD$ * $FreeBSD$
*/ */
#ifdef __linux__
#include "../queue.h" #include "../queue.h"
#else
#include <sys/queue.h>
#endif
#ifndef TRUE #ifndef TRUE
#define TRUE 1 #define TRUE 1

View File

@@ -52,11 +52,7 @@
#include <string.h> #include <string.h>
#include <sysexits.h> #include <sysexits.h>
#ifdef __linux__
#include "../queue.h" #include "../queue.h"
#else
#include <sys/queue.h>
#endif
#include "aicasm.h" #include "aicasm.h"
#include "aicasm_symbol.h" #include "aicasm_symbol.h"

View File

@@ -52,11 +52,7 @@
#include <string.h> #include <string.h>
#include <sysexits.h> #include <sysexits.h>
#ifdef __linux__
#include "../queue.h" #include "../queue.h"
#else
#include <sys/queue.h>
#endif
#include "aicasm.h" #include "aicasm.h"
#include "aicasm_symbol.h" #include "aicasm_symbol.h"

View File

@@ -51,11 +51,7 @@
#include <stdio.h> #include <stdio.h>
#include <string.h> #include <string.h>
#include <sysexits.h> #include <sysexits.h>
#ifdef __linux__
#include "../queue.h" #include "../queue.h"
#else
#include <sys/queue.h>
#endif
#include "aicasm.h" #include "aicasm.h"
#include "aicasm_symbol.h" #include "aicasm_symbol.h"

View File

@@ -51,11 +51,7 @@
#include <stdio.h> #include <stdio.h>
#include <string.h> #include <string.h>
#include <sysexits.h> #include <sysexits.h>
#ifdef __linux__
#include "../queue.h" #include "../queue.h"
#else
#include <sys/queue.h>
#endif
#include "aicasm.h" #include "aicasm.h"
#include "aicasm_symbol.h" #include "aicasm_symbol.h"

View File

@@ -44,11 +44,7 @@
#include <sys/types.h> #include <sys/types.h>
#ifdef __linux__
#include "aicdb.h" #include "aicdb.h"
#else
#include <db.h>
#endif
#include <fcntl.h> #include <fcntl.h>
#include <inttypes.h> #include <inttypes.h>
#include <regex.h> #include <regex.h>

View File

@@ -42,11 +42,7 @@
* $FreeBSD$ * $FreeBSD$
*/ */
#ifdef __linux__
#include "../queue.h" #include "../queue.h"
#else
#include <sys/queue.h>
#endif
typedef enum { typedef enum {
UNINITIALIZED, UNINITIALIZED,

View File

@@ -771,13 +771,8 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto Err_remove; goto Err_remove;
err = -ENODEV; err = -ENODEV;
if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64)) if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) ||
&& !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64))) dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
;
else if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32))
&& !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)))
;
else {
asd_printk("no suitable DMA mask for %s\n", pci_name(dev)); asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
goto Err_remove; goto Err_remove;
} }

View File

@@ -724,9 +724,11 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sas_lrate) { switch (pd->max_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS: case SAS_LINK_RATE_6_0_GBPS:
*speed_mask &= ~SAS_SPEED_60_DIS; *speed_mask &= ~SAS_SPEED_60_DIS;
/* fall through*/
default: default:
case SAS_LINK_RATE_3_0_GBPS: case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SAS_SPEED_30_DIS; *speed_mask &= ~SAS_SPEED_30_DIS;
/* fall through*/
case SAS_LINK_RATE_1_5_GBPS: case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SAS_SPEED_15_DIS; *speed_mask &= ~SAS_SPEED_15_DIS;
} }
@@ -734,6 +736,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->min_sas_lrate) { switch (pd->min_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS: case SAS_LINK_RATE_6_0_GBPS:
*speed_mask |= SAS_SPEED_30_DIS; *speed_mask |= SAS_SPEED_30_DIS;
/* fall through*/
case SAS_LINK_RATE_3_0_GBPS: case SAS_LINK_RATE_3_0_GBPS:
*speed_mask |= SAS_SPEED_15_DIS; *speed_mask |= SAS_SPEED_15_DIS;
default: default:
@@ -745,6 +748,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sata_lrate) { switch (pd->max_sata_lrate) {
case SAS_LINK_RATE_3_0_GBPS: case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SATA_SPEED_30_DIS; *speed_mask &= ~SATA_SPEED_30_DIS;
/* fall through*/
default: default:
case SAS_LINK_RATE_1_5_GBPS: case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SATA_SPEED_15_DIS; *speed_mask &= ~SATA_SPEED_15_DIS;
@@ -803,6 +807,7 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
/* link reset retries, this should be nominal */ /* link reset retries, this should be nominal */
control_phy->link_reset_retries = 10; control_phy->link_reset_retries = 10;
/* fall through */
case RELEASE_SPINUP_HOLD: /* 0x02 */ case RELEASE_SPINUP_HOLD: /* 0x02 */
/* decide the func_mask */ /* decide the func_mask */

View File

@@ -42,13 +42,13 @@ static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
} }
/* PCI_DMA_... to our direction translation. /* DMA_... to our direction translation.
*/ */
static const u8 data_dir_flags[] = { static const u8 data_dir_flags[] = {
[PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
[PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */ [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
[PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */ [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
[PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
}; };
static int asd_map_scatterlist(struct sas_task *task, static int asd_map_scatterlist(struct sas_task *task,
@@ -60,12 +60,12 @@ static int asd_map_scatterlist(struct sas_task *task,
struct scatterlist *sc; struct scatterlist *sc;
int num_sg, res; int num_sg, res;
if (task->data_dir == PCI_DMA_NONE) if (task->data_dir == DMA_NONE)
return 0; return 0;
if (task->num_scatter == 0) { if (task->num_scatter == 0) {
void *p = task->scatter; void *p = task->scatter;
dma_addr_t dma = pci_map_single(asd_ha->pcidev, p, dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
task->total_xfer_len, task->total_xfer_len,
task->data_dir); task->data_dir);
sg_arr[0].bus_addr = cpu_to_le64((u64)dma); sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
@@ -79,7 +79,7 @@ static int asd_map_scatterlist(struct sas_task *task,
if (sas_protocol_ata(task->task_proto)) if (sas_protocol_ata(task->task_proto))
num_sg = task->num_scatter; num_sg = task->num_scatter;
else else
num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter,
task->num_scatter, task->data_dir); task->num_scatter, task->data_dir);
if (num_sg == 0) if (num_sg == 0)
return -ENOMEM; return -ENOMEM;
@@ -126,8 +126,8 @@ static int asd_map_scatterlist(struct sas_task *task,
return 0; return 0;
err_unmap: err_unmap:
if (sas_protocol_ata(task->task_proto)) if (sas_protocol_ata(task->task_proto))
pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
task->data_dir); task->num_scatter, task->data_dir);
return res; return res;
} }
@@ -136,21 +136,21 @@ static void asd_unmap_scatterlist(struct asd_ascb *ascb)
struct asd_ha_struct *asd_ha = ascb->ha; struct asd_ha_struct *asd_ha = ascb->ha;
struct sas_task *task = ascb->uldd_task; struct sas_task *task = ascb->uldd_task;
if (task->data_dir == PCI_DMA_NONE) if (task->data_dir == DMA_NONE)
return; return;
if (task->num_scatter == 0) { if (task->num_scatter == 0) {
dma_addr_t dma = (dma_addr_t) dma_addr_t dma = (dma_addr_t)
le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr); le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len, dma_unmap_single(&ascb->ha->pcidev->dev, dma,
task->data_dir); task->total_xfer_len, task->data_dir);
return; return;
} }
asd_free_coherent(asd_ha, ascb->sg_arr); asd_free_coherent(asd_ha, ascb->sg_arr);
if (task->task_proto != SAS_PROTOCOL_STP) if (task->task_proto != SAS_PROTOCOL_STP)
pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
task->data_dir); task->num_scatter, task->data_dir);
} }
/* ---------- Task complete tasklet ---------- */ /* ---------- Task complete tasklet ---------- */
@@ -436,10 +436,10 @@ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
struct domain_device *dev = task->dev; struct domain_device *dev = task->dev;
struct scb *scb; struct scb *scb;
pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1, dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1, dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
scb = ascb->scb; scb = ascb->scb;
@@ -471,10 +471,10 @@ static void asd_unbuild_smp_ascb(struct asd_ascb *a)
struct sas_task *task = a->uldd_task; struct sas_task *task = a->uldd_task;
BUG_ON(!task); BUG_ON(!task);
pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1, dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1, dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
} }
/* ---------- SSP ---------- */ /* ---------- SSP ---------- */

View File

@@ -96,9 +96,7 @@ static void pci_esp_dma_drain(struct esp *esp);
static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp) static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
{ {
struct pci_dev *pdev = esp->dev; return dev_get_drvdata(esp->dev);
return pci_get_drvdata(pdev);
} }
static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg) static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
@@ -116,30 +114,6 @@ static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
return iowrite32(val, esp->regs + (reg * 4UL)); return iowrite32(val, esp->regs + (reg * 4UL));
} }
static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf,
size_t sz, int dir)
{
return pci_map_single(esp->dev, buf, sz, dir);
}
static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
return pci_map_sg(esp->dev, sg, num_sg, dir);
}
static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr,
size_t sz, int dir)
{
pci_unmap_single(esp->dev, addr, sz, dir);
}
static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
pci_unmap_sg(esp->dev, sg, num_sg, dir);
}
static int pci_esp_irq_pending(struct esp *esp) static int pci_esp_irq_pending(struct esp *esp)
{ {
struct pci_esp_priv *pep = pci_esp_get_priv(esp); struct pci_esp_priv *pep = pci_esp_get_priv(esp);
@@ -295,10 +269,6 @@ static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
static const struct esp_driver_ops pci_esp_ops = { static const struct esp_driver_ops pci_esp_ops = {
.esp_write8 = pci_esp_write8, .esp_write8 = pci_esp_write8,
.esp_read8 = pci_esp_read8, .esp_read8 = pci_esp_read8,
.map_single = pci_esp_map_single,
.map_sg = pci_esp_map_sg,
.unmap_single = pci_esp_unmap_single,
.unmap_sg = pci_esp_unmap_sg,
.irq_pending = pci_esp_irq_pending, .irq_pending = pci_esp_irq_pending,
.reset_dma = pci_esp_reset_dma, .reset_dma = pci_esp_reset_dma,
.dma_drain = pci_esp_dma_drain, .dma_drain = pci_esp_dma_drain,
@@ -375,18 +345,18 @@ static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
static void dc390_check_eeprom(struct esp *esp) static void dc390_check_eeprom(struct esp *esp)
{ {
struct pci_dev *pdev = to_pci_dev(esp->dev);
u8 EEbuf[128]; u8 EEbuf[128];
u16 *ptr = (u16 *)EEbuf, wval = 0; u16 *ptr = (u16 *)EEbuf, wval = 0;
int i; int i;
dc390_read_eeprom((struct pci_dev *)esp->dev, ptr); dc390_read_eeprom(pdev, ptr);
for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++) for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
wval += *ptr; wval += *ptr;
/* no Tekram EEprom found */ /* no Tekram EEprom found */
if (wval != 0x1234) { if (wval != 0x1234) {
struct pci_dev *pdev = esp->dev;
dev_printk(KERN_INFO, &pdev->dev, dev_printk(KERN_INFO, &pdev->dev,
"No valid Tekram EEprom found\n"); "No valid Tekram EEprom found\n");
return; return;
@@ -411,7 +381,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
return -ENODEV; return -ENODEV;
} }
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dev_printk(KERN_INFO, &pdev->dev, dev_printk(KERN_INFO, &pdev->dev,
"failed to set 32bit DMA mask\n"); "failed to set 32bit DMA mask\n");
goto fail_disable_device; goto fail_disable_device;
@@ -435,7 +405,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
esp = shost_priv(shost); esp = shost_priv(shost);
esp->host = shost; esp->host = shost;
esp->dev = pdev; esp->dev = &pdev->dev;
esp->ops = &pci_esp_ops; esp->ops = &pci_esp_ops;
/* /*
* The am53c974 HBA has a design flaw of generating * The am53c974 HBA has a design flaw of generating
@@ -467,8 +437,8 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
pci_set_master(pdev); pci_set_master(pdev);
esp->command_block = pci_alloc_consistent(pdev, 16, esp->command_block = dma_alloc_coherent(&pdev->dev, 16,
&esp->command_block_dma); &esp->command_block_dma, GFP_KERNEL);
if (!esp->command_block) { if (!esp->command_block) {
dev_printk(KERN_ERR, &pdev->dev, dev_printk(KERN_ERR, &pdev->dev,
"failed to allocate command block\n"); "failed to allocate command block\n");
@@ -498,7 +468,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
/* Assume 40MHz clock */ /* Assume 40MHz clock */
esp->cfreq = 40000000; esp->cfreq = 40000000;
err = scsi_esp_register(esp, &pdev->dev); err = scsi_esp_register(esp);
if (err) if (err)
goto fail_free_irq; goto fail_free_irq;
@@ -508,8 +478,8 @@ fail_free_irq:
free_irq(pdev->irq, esp); free_irq(pdev->irq, esp);
fail_unmap_command_block: fail_unmap_command_block:
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
pci_free_consistent(pdev, 16, esp->command_block, dma_free_coherent(&pdev->dev, 16, esp->command_block,
esp->command_block_dma); esp->command_block_dma);
fail_unmap_regs: fail_unmap_regs:
pci_iounmap(pdev, esp->regs); pci_iounmap(pdev, esp->regs);
fail_release_regions: fail_release_regions:
@@ -532,8 +502,8 @@ static void pci_esp_remove_one(struct pci_dev *pdev)
scsi_esp_unregister(esp); scsi_esp_unregister(esp);
free_irq(pdev->irq, esp); free_irq(pdev->irq, esp);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
pci_free_consistent(pdev, 16, esp->command_block, dma_free_coherent(&pdev->dev, 16, esp->command_block,
esp->command_block_dma); esp->command_block_dma);
pci_iounmap(pdev, esp->regs); pci_iounmap(pdev, esp->regs);
pci_release_regions(pdev); pci_release_regions(pdev);
pci_disable_device(pdev); pci_disable_device(pdev);

View File

@@ -1317,13 +1317,10 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
{ {
int id, lun;
if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
if (pCCB->startdone == ARCMSR_CCB_ABORTED) { if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
struct scsi_cmnd *abortcmd = pCCB->pcmd; struct scsi_cmnd *abortcmd = pCCB->pcmd;
if (abortcmd) { if (abortcmd) {
id = abortcmd->device->id;
lun = abortcmd->device->lun;
abortcmd->result |= DID_ABORT << 16; abortcmd->result |= DID_ABORT << 16;
arcmsr_ccb_complete(pCCB); arcmsr_ccb_complete(pCCB);
printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n", printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
@@ -1798,7 +1795,7 @@ static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0); writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
if (!arcmsr_hbaA_wait_msgint_ready(acb)) { if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE printk(KERN_NOTICE
"arcmsr%d: wait 'stop adapter background rebulid' timeout\n" "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, acb->host->host_no); , acb->host->host_no);
} }
} }
@@ -1811,7 +1808,7 @@ static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
if (!arcmsr_hbaB_wait_msgint_ready(acb)) { if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE printk(KERN_NOTICE
"arcmsr%d: wait 'stop adapter background rebulid' timeout\n" "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, acb->host->host_no); , acb->host->host_no);
} }
} }
@@ -1824,7 +1821,7 @@ static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE printk(KERN_NOTICE
"arcmsr%d: wait 'stop adapter background rebulid' timeout\n" "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, pACB->host->host_no); , pACB->host->host_no);
} }
return; return;
@@ -1837,7 +1834,7 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0); writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
if (!arcmsr_hbaD_wait_msgint_ready(pACB)) if (!arcmsr_hbaD_wait_msgint_ready(pACB))
pr_notice("arcmsr%d: wait 'stop adapter background rebulid' " pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
"timeout\n", pACB->host->host_no); "timeout\n", pACB->host->host_no);
} }
@@ -1850,7 +1847,7 @@ static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
writel(pACB->out_doorbell, &reg->iobound_doorbell); writel(pACB->out_doorbell, &reg->iobound_doorbell);
if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
pr_notice("arcmsr%d: wait 'stop adapter background rebulid' " pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
"timeout\n", pACB->host->host_no); "timeout\n", pACB->host->host_no);
} }
} }
@@ -3927,7 +3924,7 @@ static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0); writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
if (!arcmsr_hbaA_wait_msgint_ready(acb)) { if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
rebulid' timeout \n", acb->host->host_no); rebuild' timeout \n", acb->host->host_no);
} }
} }
@@ -3938,7 +3935,7 @@ static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell); writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
if (!arcmsr_hbaB_wait_msgint_ready(acb)) { if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
rebulid' timeout \n",acb->host->host_no); rebuild' timeout \n",acb->host->host_no);
} }
} }
@@ -3950,7 +3947,7 @@ static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
if (!arcmsr_hbaC_wait_msgint_ready(pACB)) { if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
rebulid' timeout \n", pACB->host->host_no); rebuild' timeout \n", pACB->host->host_no);
} }
return; return;
} }
@@ -3963,7 +3960,7 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0); writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
if (!arcmsr_hbaD_wait_msgint_ready(pACB)) { if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
pr_notice("arcmsr%d: wait 'start adapter " pr_notice("arcmsr%d: wait 'start adapter "
"background rebulid' timeout\n", pACB->host->host_no); "background rebuild' timeout\n", pACB->host->host_no);
} }
} }
@@ -3977,7 +3974,7 @@ static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
writel(pACB->out_doorbell, &pmu->iobound_doorbell); writel(pACB->out_doorbell, &pmu->iobound_doorbell);
if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
pr_notice("arcmsr%d: wait 'start adapter " pr_notice("arcmsr%d: wait 'start adapter "
"background rebulid' timeout \n", pACB->host->host_no); "background rebuild' timeout \n", pACB->host->host_no);
} }
} }
@@ -4135,9 +4132,9 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
pci_read_config_byte(acb->pdev, i, &value[i]); pci_read_config_byte(acb->pdev, i, &value[i]);
} }
/* hardware reset signal */ /* hardware reset signal */
if ((acb->dev_id == 0x1680)) { if (acb->dev_id == 0x1680) {
writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]); writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
} else if ((acb->dev_id == 0x1880)) { } else if (acb->dev_id == 0x1880) {
do { do {
count++; count++;
writel(0xF, &pmuC->write_sequence); writel(0xF, &pmuC->write_sequence);
@@ -4161,7 +4158,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
} while (((readl(&pmuE->host_diagnostic_3xxx) & } while (((readl(&pmuE->host_diagnostic_3xxx) &
ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5)); ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx); writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
} else if ((acb->dev_id == 0x1214)) { } else if (acb->dev_id == 0x1214) {
writel(0x20, pmuD->reset_request); writel(0x20, pmuD->reset_request);
} else { } else {
pci_write_config_byte(acb->pdev, 0x84, 0x20); pci_write_config_byte(acb->pdev, 0x84, 0x20);

View File

@@ -1193,7 +1193,7 @@ static void atp870u_free_tables(struct Scsi_Host *host)
for (k = 0; k < 16; k++) { for (k = 0; k < 16; k++) {
if (!atp_dev->id[j][k].prd_table) if (!atp_dev->id[j][k].prd_table)
continue; continue;
pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus); dma_free_coherent(&atp_dev->pdev->dev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
atp_dev->id[j][k].prd_table = NULL; atp_dev->id[j][k].prd_table = NULL;
} }
} }
@@ -1205,7 +1205,7 @@ static int atp870u_init_tables(struct Scsi_Host *host)
int c,k; int c,k;
for(c=0;c < 2;c++) { for(c=0;c < 2;c++) {
for(k=0;k<16;k++) { for(k=0;k<16;k++) {
atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus)); atp_dev->id[c][k].prd_table = dma_alloc_coherent(&atp_dev->pdev->dev, 1024, &(atp_dev->id[c][k].prd_bus), GFP_KERNEL);
if (!atp_dev->id[c][k].prd_table) { if (!atp_dev->id[c][k].prd_table) {
printk("atp870u_init_tables fail\n"); printk("atp870u_init_tables fail\n");
atp870u_free_tables(host); atp870u_free_tables(host);
@@ -1509,7 +1509,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) if (err)
goto fail; goto fail;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "atp870u: DMA mask required but not available.\n"); printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
err = -EIO; err = -EIO;
goto disable_device; goto disable_device;

View File

@@ -520,7 +520,7 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
**/ **/
tag_mem = &ctrl->ptag_state[tag].tag_mem_state; tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
if (tag_mem->size) { if (tag_mem->size) {
pci_free_consistent(ctrl->pdev, tag_mem->size, dma_free_coherent(&ctrl->pdev->dev, tag_mem->size,
tag_mem->va, tag_mem->dma); tag_mem->va, tag_mem->dma);
tag_mem->size = 0; tag_mem->size = 0;
} }
@@ -1269,12 +1269,12 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
struct be_sge *sge = nonembedded_sgl(wrb); struct be_sge *sge = nonembedded_sgl(wrb);
int status = 0; int status = 0;
nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev, nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev,
sizeof(struct be_mgmt_controller_attributes), sizeof(struct be_mgmt_controller_attributes),
&nonemb_cmd.dma); &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) { if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BG_%d : pci_alloc_consistent failed in %s\n", "BG_%d : dma_alloc_coherent failed in %s\n",
__func__); __func__);
return -ENOMEM; return -ENOMEM;
} }
@@ -1314,7 +1314,7 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
"BG_%d : Failed in beiscsi_check_supported_fw\n"); "BG_%d : Failed in beiscsi_check_supported_fw\n");
mutex_unlock(&ctrl->mbox_lock); mutex_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va) if (nonemb_cmd.va)
pci_free_consistent(ctrl->pdev, nonemb_cmd.size, dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma); nonemb_cmd.va, nonemb_cmd.dma);
return status; return status;

View File

@@ -771,7 +771,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
status = beiscsi_get_initiator_name(phba, buf, false); status = beiscsi_get_initiator_name(phba, buf, false);
if (status < 0) { if (status < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Retreiving Initiator Name Failed\n"); "BS_%d : Retrieving Initiator Name Failed\n");
status = 0; status = 0;
} }
} }
@@ -1071,9 +1071,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
else else
req_memsize = sizeof(struct tcp_connect_and_offload_in_v1); req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
req_memsize, req_memsize,
&nonemb_cmd.dma); &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) { if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -1091,7 +1091,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : mgmt_open_connection Failed for cid=%d\n", "BS_%d : mgmt_open_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid); beiscsi_ep->ep_cid);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma); nonemb_cmd.va, nonemb_cmd.dma);
beiscsi_free_ep(beiscsi_ep); beiscsi_free_ep(beiscsi_ep);
return -EAGAIN; return -EAGAIN;
@@ -1104,8 +1104,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : mgmt_open_connection Failed"); "BS_%d : mgmt_open_connection Failed");
if (ret != -EBUSY) if (ret != -EBUSY)
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, dma_free_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd.va, nonemb_cmd.dma); nonemb_cmd.size, nonemb_cmd.va,
nonemb_cmd.dma);
beiscsi_free_ep(beiscsi_ep); beiscsi_free_ep(beiscsi_ep);
return ret; return ret;
@@ -1118,7 +1119,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_open_connection Success\n"); "BS_%d : mgmt_open_connection Success\n");
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma); nonemb_cmd.va, nonemb_cmd.dma);
return 0; return 0;
} }

View File

@@ -511,18 +511,9 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
} }
pci_set_master(pcidev); pci_set_master(pcidev);
ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
if (ret) { if (ret) {
ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
goto pci_region_release;
} else {
ret = pci_set_consistent_dma_mask(pcidev,
DMA_BIT_MASK(32));
}
} else {
ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
if (ret) { if (ret) {
dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
goto pci_region_release; goto pci_region_release;
@@ -550,9 +541,8 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
if (status) if (status)
return status; return status;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
mbox_mem_alloc->va = pci_alloc_consistent(pdev, mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev,
mbox_mem_alloc->size, mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL);
&mbox_mem_alloc->dma);
if (!mbox_mem_alloc->va) { if (!mbox_mem_alloc->va) {
beiscsi_unmap_pci_function(phba); beiscsi_unmap_pci_function(phba);
return -ENOMEM; return -ENOMEM;
@@ -1866,7 +1856,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
{ {
struct be_queue_info *cq; struct be_queue_info *cq;
struct sol_cqe *sol; struct sol_cqe *sol;
struct dmsg_cqe *dmsg;
unsigned int total = 0; unsigned int total = 0;
unsigned int num_processed = 0; unsigned int num_processed = 0;
unsigned short code = 0, cid = 0; unsigned short code = 0, cid = 0;
@@ -1939,7 +1928,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
"BM_%d : Received %s[%d] on CID : %d\n", "BM_%d : Received %s[%d] on CID : %d\n",
cqe_desc[code], code, cid); cqe_desc[code], code, cid);
dmsg = (struct dmsg_cqe *)sol;
hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
break; break;
case UNSOL_HDR_NOTIFY: case UNSOL_HDR_NOTIFY:
@@ -2304,11 +2292,11 @@ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
/* Map addr only if there is data_count */ /* Map addr only if there is data_count */
if (dsp_value) { if (dsp_value) {
io_task->mtask_addr = pci_map_single(phba->pcidev, io_task->mtask_addr = dma_map_single(&phba->pcidev->dev,
task->data, task->data,
task->data_count, task->data_count,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (pci_dma_mapping_error(phba->pcidev, if (dma_mapping_error(&phba->pcidev->dev,
io_task->mtask_addr)) io_task->mtask_addr))
return -ENOMEM; return -ENOMEM;
io_task->mtask_data_count = task->data_count; io_task->mtask_data_count = task->data_count;
@@ -2519,10 +2507,9 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
BEISCSI_MAX_FRAGS_INIT); BEISCSI_MAX_FRAGS_INIT);
curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
do { do {
mem_arr->virtual_address = pci_alloc_consistent( mem_arr->virtual_address =
phba->pcidev, dma_alloc_coherent(&phba->pcidev->dev,
curr_alloc_size, curr_alloc_size, &bus_add, GFP_KERNEL);
&bus_add);
if (!mem_arr->virtual_address) { if (!mem_arr->virtual_address) {
if (curr_alloc_size <= BE_MIN_MEM_SIZE) if (curr_alloc_size <= BE_MIN_MEM_SIZE)
goto free_mem; goto free_mem;
@@ -2560,7 +2547,7 @@ free_mem:
mem_descr->num_elements = j; mem_descr->num_elements = j;
while ((i) || (j)) { while ((i) || (j)) {
for (j = mem_descr->num_elements; j > 0; j--) { for (j = mem_descr->num_elements; j > 0; j--) {
pci_free_consistent(phba->pcidev, dma_free_coherent(&phba->pcidev->dev,
mem_descr->mem_array[j - 1].size, mem_descr->mem_array[j - 1].size,
mem_descr->mem_array[j - 1]. mem_descr->mem_array[j - 1].
virtual_address, virtual_address,
@@ -3031,9 +3018,9 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
eq = &phwi_context->be_eq[i].q; eq = &phwi_context->be_eq[i].q;
mem = &eq->dma_mem; mem = &eq->dma_mem;
phwi_context->be_eq[i].phba = phba; phwi_context->be_eq[i].phba = phba;
eq_vaddress = pci_alloc_consistent(phba->pcidev, eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
num_eq_pages * PAGE_SIZE, num_eq_pages * PAGE_SIZE,
&paddr); &paddr, GFP_KERNEL);
if (!eq_vaddress) { if (!eq_vaddress) {
ret = -ENOMEM; ret = -ENOMEM;
goto create_eq_error; goto create_eq_error;
@@ -3069,7 +3056,7 @@ create_eq_error:
eq = &phwi_context->be_eq[i].q; eq = &phwi_context->be_eq[i].q;
mem = &eq->dma_mem; mem = &eq->dma_mem;
if (mem->va) if (mem->va)
pci_free_consistent(phba->pcidev, num_eq_pages dma_free_coherent(&phba->pcidev->dev, num_eq_pages
* PAGE_SIZE, * PAGE_SIZE,
mem->va, mem->dma); mem->va, mem->dma);
} }
@@ -3097,9 +3084,9 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
pbe_eq->cq = cq; pbe_eq->cq = cq;
pbe_eq->phba = phba; pbe_eq->phba = phba;
mem = &cq->dma_mem; mem = &cq->dma_mem;
cq_vaddress = pci_alloc_consistent(phba->pcidev, cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
num_cq_pages * PAGE_SIZE, num_cq_pages * PAGE_SIZE,
&paddr); &paddr, GFP_KERNEL);
if (!cq_vaddress) { if (!cq_vaddress) {
ret = -ENOMEM; ret = -ENOMEM;
goto create_cq_error; goto create_cq_error;
@@ -3134,7 +3121,7 @@ create_cq_error:
cq = &phwi_context->be_cq[i]; cq = &phwi_context->be_cq[i];
mem = &cq->dma_mem; mem = &cq->dma_mem;
if (mem->va) if (mem->va)
pci_free_consistent(phba->pcidev, num_cq_pages dma_free_coherent(&phba->pcidev->dev, num_cq_pages
* PAGE_SIZE, * PAGE_SIZE,
mem->va, mem->dma); mem->va, mem->dma);
} }
@@ -3326,7 +3313,7 @@ static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
{ {
struct be_dma_mem *mem = &q->dma_mem; struct be_dma_mem *mem = &q->dma_mem;
if (mem->va) { if (mem->va) {
pci_free_consistent(phba->pcidev, mem->size, dma_free_coherent(&phba->pcidev->dev, mem->size,
mem->va, mem->dma); mem->va, mem->dma);
mem->va = NULL; mem->va = NULL;
} }
@@ -3341,7 +3328,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
q->len = len; q->len = len;
q->entry_size = entry_size; q->entry_size = entry_size;
mem->size = len * entry_size; mem->size = len * entry_size;
mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
GFP_KERNEL);
if (!mem->va) if (!mem->va)
return -ENOMEM; return -ENOMEM;
return 0; return 0;
@@ -3479,7 +3467,7 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
&ctrl->ptag_state[tag].tag_state)) { &ctrl->ptag_state[tag].tag_state)) {
ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
if (ptag_mem->size) { if (ptag_mem->size) {
pci_free_consistent(ctrl->pdev, dma_free_coherent(&ctrl->pdev->dev,
ptag_mem->size, ptag_mem->size,
ptag_mem->va, ptag_mem->va,
ptag_mem->dma); ptag_mem->dma);
@@ -3880,7 +3868,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
j = 0; j = 0;
for (i = 0; i < SE_MEM_MAX; i++) { for (i = 0; i < SE_MEM_MAX; i++) {
for (j = mem_descr->num_elements; j > 0; j--) { for (j = mem_descr->num_elements; j > 0; j--) {
pci_free_consistent(phba->pcidev, dma_free_coherent(&phba->pcidev->dev,
mem_descr->mem_array[j - 1].size, mem_descr->mem_array[j - 1].size,
mem_descr->mem_array[j - 1].virtual_address, mem_descr->mem_array[j - 1].virtual_address,
(unsigned long)mem_descr->mem_array[j - 1]. (unsigned long)mem_descr->mem_array[j - 1].
@@ -4255,10 +4243,10 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
} }
if (io_task->mtask_addr) { if (io_task->mtask_addr) {
pci_unmap_single(phba->pcidev, dma_unmap_single(&phba->pcidev->dev,
io_task->mtask_addr, io_task->mtask_addr,
io_task->mtask_data_count, io_task->mtask_data_count,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
io_task->mtask_addr = 0; io_task->mtask_addr = 0;
} }
} }
@@ -4852,9 +4840,9 @@ static int beiscsi_bsg_request(struct bsg_job *job)
switch (bsg_req->msgcode) { switch (bsg_req->msgcode) {
case ISCSI_BSG_HST_VENDOR: case ISCSI_BSG_HST_VENDOR:
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
job->request_payload.payload_len, job->request_payload.payload_len,
&nonemb_cmd.dma); &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) { if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BM_%d : Failed to allocate memory for " "BM_%d : Failed to allocate memory for "
@@ -4867,7 +4855,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BM_%d : MBX Tag Allocation Failed\n"); "BM_%d : MBX Tag Allocation Failed\n");
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma); nonemb_cmd.va, nonemb_cmd.dma);
return -EAGAIN; return -EAGAIN;
} }
@@ -4881,7 +4869,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
clear_bit(MCC_TAG_STATE_RUNNING, clear_bit(MCC_TAG_STATE_RUNNING,
&phba->ctrl.ptag_state[tag].tag_state); &phba->ctrl.ptag_state[tag].tag_state);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma); nonemb_cmd.va, nonemb_cmd.dma);
return -EIO; return -EIO;
} }
@@ -4898,7 +4886,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
bsg_reply->result = status; bsg_reply->result = status;
bsg_job_done(job, bsg_reply->result, bsg_job_done(job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len); bsg_reply->reply_payload_rcv_len);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma); nonemb_cmd.va, nonemb_cmd.dma);
if (status || extd_status) { if (status || extd_status) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -5754,7 +5742,7 @@ free_twq:
beiscsi_cleanup_port(phba); beiscsi_cleanup_port(phba);
beiscsi_free_mem(phba); beiscsi_free_mem(phba);
free_port: free_port:
pci_free_consistent(phba->pcidev, dma_free_coherent(&phba->pcidev->dev,
phba->ctrl.mbox_mem_alloced.size, phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va, phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma); phba->ctrl.mbox_mem_alloced.dma);
@@ -5798,7 +5786,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
/* ctrl uninit */ /* ctrl uninit */
beiscsi_unmap_pci_function(phba); beiscsi_unmap_pci_function(phba);
pci_free_consistent(phba->pcidev, dma_free_coherent(&phba->pcidev->dev,
phba->ctrl.mbox_mem_alloced.size, phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va, phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma); phba->ctrl.mbox_mem_alloced.dma);

View File

@@ -284,7 +284,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
return rc; return rc;
free_cmd: free_cmd:
pci_free_consistent(ctrl->pdev, nonemb_cmd->size, dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
nonemb_cmd->va, nonemb_cmd->dma); nonemb_cmd->va, nonemb_cmd->dma);
return rc; return rc;
} }
@@ -293,7 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
struct be_dma_mem *cmd, struct be_dma_mem *cmd,
u8 subsystem, u8 opcode, u32 size) u8 subsystem, u8 opcode, u32 size)
{ {
cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma); cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
GFP_KERNEL);
if (!cmd->va) { if (!cmd->va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to allocate memory for if info\n"); "BG_%d : Failed to allocate memory for if info\n");
@@ -315,7 +316,7 @@ static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
__beiscsi_mcc_compl_status(phba, tag, NULL, NULL); __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
if (tag_mem->size) { if (tag_mem->size) {
pci_free_consistent(phba->pcidev, tag_mem->size, dma_free_coherent(&phba->pcidev->dev, tag_mem->size,
tag_mem->va, tag_mem->dma); tag_mem->va, tag_mem->dma);
tag_mem->size = 0; tag_mem->size = 0;
} }
@@ -761,7 +762,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
"BG_%d : Memory Allocation Failure\n"); "BG_%d : Memory Allocation Failure\n");
/* Free the DMA memory for the IOCTL issuing */ /* Free the DMA memory for the IOCTL issuing */
pci_free_consistent(phba->ctrl.pdev, dma_free_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd.size, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.va,
nonemb_cmd.dma); nonemb_cmd.dma);
@@ -780,7 +781,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
ioctl_size += sizeof(struct be_cmd_req_hdr); ioctl_size += sizeof(struct be_cmd_req_hdr);
/* Free the previous allocated DMA memory */ /* Free the previous allocated DMA memory */
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.va,
nonemb_cmd.dma); nonemb_cmd.dma);
@@ -869,7 +870,7 @@ static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
status); status);
boot_work = 0; boot_work = 0;
} }
pci_free_consistent(phba->ctrl.pdev, bs->nonemb_cmd.size, dma_free_coherent(&phba->ctrl.pdev->dev, bs->nonemb_cmd.size,
bs->nonemb_cmd.va, bs->nonemb_cmd.dma); bs->nonemb_cmd.va, bs->nonemb_cmd.dma);
bs->nonemb_cmd.va = NULL; bs->nonemb_cmd.va = NULL;
break; break;
@@ -1012,9 +1013,10 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
nonemb_cmd = &phba->boot_struct.nonemb_cmd; nonemb_cmd = &phba->boot_struct.nonemb_cmd;
nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp); nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp);
nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev, nonemb_cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd->size, nonemb_cmd->size,
&nonemb_cmd->dma); &nonemb_cmd->dma,
GFP_KERNEL);
if (!nonemb_cmd->va) { if (!nonemb_cmd->va) {
mutex_unlock(&ctrl->mbox_lock); mutex_unlock(&ctrl->mbox_lock);
return 0; return 0;
@@ -1508,9 +1510,10 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
return -EINVAL; return -EINVAL;
nonemb_cmd.size = sizeof(union be_invldt_cmds_params); nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd.size, nonemb_cmd.size,
&nonemb_cmd.dma); &nonemb_cmd.dma,
GFP_KERNEL);
if (!nonemb_cmd.va) { if (!nonemb_cmd.va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
"BM_%d : invldt_cmds_params alloc failed\n"); "BM_%d : invldt_cmds_params alloc failed\n");
@@ -1521,7 +1524,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
wrb = alloc_mcc_wrb(phba, &tag); wrb = alloc_mcc_wrb(phba, &tag);
if (!wrb) { if (!wrb) {
mutex_unlock(&ctrl->mbox_lock); mutex_unlock(&ctrl->mbox_lock);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma); nonemb_cmd.va, nonemb_cmd.dma);
return -ENOMEM; return -ENOMEM;
} }
@@ -1548,7 +1551,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (rc != -EBUSY) if (rc != -EBUSY)
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma); nonemb_cmd.va, nonemb_cmd.dma);
return rc; return rc;
} }

View File

@@ -1453,7 +1453,7 @@ union bfa_aen_data_u {
struct bfa_aen_entry_s { struct bfa_aen_entry_s {
struct list_head qe; struct list_head qe;
enum bfa_aen_category aen_category; enum bfa_aen_category aen_category;
u32 aen_type; int aen_type;
union bfa_aen_data_u aen_data; union bfa_aen_data_u aen_data;
u64 aen_tv_sec; u64 aen_tv_sec;
u64 aen_tv_usec; u64 aen_tv_usec;

View File

@@ -190,27 +190,6 @@ fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
fchs->ox_id = ox_id; fchs->ox_id = ox_id;
} }
enum fc_parse_status
fc_els_rsp_parse(struct fchs_s *fchs, int len)
{
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
len = len;
switch (els_cmd->els_code) {
case FC_ELS_LS_RJT:
if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
return FC_PARSE_BUSY;
else
return FC_PARSE_FAILURE;
case FC_ELS_ACC:
return FC_PARSE_OK;
}
return FC_PARSE_OK;
}
static void static void
fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
{ {
@@ -830,18 +809,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
return sizeof(struct fc_rpsc_acc_s); return sizeof(struct fc_rpsc_acc_s);
} }
u16
fc_logo_rsp_parse(struct fchs_s *fchs, int len)
{
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
len = len;
if (els_cmd->els_code != FC_ELS_ACC)
return FC_PARSE_FAILURE;
return FC_PARSE_OK;
}
u16 u16
fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
wwn_t port_name, wwn_t node_name, u16 pdu_size) wwn_t port_name, wwn_t node_name, u16 pdu_size)
@@ -907,40 +874,6 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
return be16_to_cpu(prlo->payload_len); return be16_to_cpu(prlo->payload_len);
} }
u16
fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
{
struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
int num_pages = 0;
int page = 0;
len = len;
if (prlo->command != FC_ELS_ACC)
return FC_PARSE_FAILURE;
num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
for (page = 0; page < num_pages; page++) {
if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
return FC_PARSE_FAILURE;
if (prlo->prlo_acc_params[page].opa_valid != 0)
return FC_PARSE_FAILURE;
if (prlo->prlo_acc_params[page].rpa_valid != 0)
return FC_PARSE_FAILURE;
if (prlo->prlo_acc_params[page].orig_process_assc != 0)
return FC_PARSE_FAILURE;
if (prlo->prlo_acc_params[page].resp_process_assc != 0)
return FC_PARSE_FAILURE;
}
return FC_PARSE_OK;
}
u16 u16
fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id) int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
@@ -971,47 +904,6 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
return be16_to_cpu(tprlo->payload_len); return be16_to_cpu(tprlo->payload_len);
} }
u16
fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
{
struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1);
int num_pages = 0;
int page = 0;
len = len;
if (tprlo->command != FC_ELS_ACC)
return FC_PARSE_ACC_INVAL;
num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
for (page = 0; page < num_pages; page++) {
if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
return FC_PARSE_NOT_FCP;
if (tprlo->tprlo_acc_params[page].opa_valid != 0)
return FC_PARSE_OPAFLAG_INVAL;
if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
return FC_PARSE_RPAFLAG_INVAL;
if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
return FC_PARSE_OPA_INVAL;
if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
return FC_PARSE_RPA_INVAL;
}
return FC_PARSE_OK;
}
enum fc_parse_status
fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
{
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
len = len;
if (els_cmd->els_code != FC_ELS_ACC)
return FC_PARSE_FAILURE;
return FC_PARSE_OK;
}
u16 u16
fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id, fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
u32 reason_code, u32 reason_expl) u32 reason_code, u32 reason_expl)

View File

@@ -163,7 +163,6 @@ enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id, u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
u32 s_id, u16 ox_id, u16 rrq_oxid); u32 s_id, u16 ox_id, u16 rrq_oxid);
enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id, u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, u8 *name); u16 ox_id, u8 *name);
@@ -276,8 +275,6 @@ void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
__be16 ox_id); __be16 ox_id);
enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len, enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
wwn_t port_name); wwn_t port_name);
@@ -297,8 +294,6 @@ u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
u32 d_id, u32 s_id, __be16 ox_id, int num_pages); u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name, u16 ox_id, wwn_t port_name, wwn_t node_name,
u16 pdu_size); u16 pdu_size);
@@ -308,14 +303,10 @@ u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, int num_pages); u16 ox_id, int num_pages);
u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type, u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
u32 tpr_id); u32 tpr_id);
u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
__be16 ox_id, u32 reason_code, u32 reason_expl); __be16 ox_id, u32 reason_code, u32 reason_expl);

View File

@@ -143,7 +143,7 @@ struct bfad_im_s {
static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry, static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
struct bfad_s *drv, int cnt, struct bfad_s *drv, int cnt,
enum bfa_aen_category cat, enum bfa_aen_category cat,
enum bfa_ioc_aen_event evt) int evt)
{ {
struct timespec64 ts; struct timespec64 ts;

View File

@@ -432,7 +432,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
struct fcoe_rcv_info *fr; struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg; struct fcoe_percpu_s *bg;
struct sk_buff *tmp_skb; struct sk_buff *tmp_skb;
unsigned short oxid;
interface = container_of(ptype, struct bnx2fc_interface, interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type); fcoe_packet_type);
@@ -466,8 +465,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
fh = (struct fc_frame_header *) skb_transport_header(skb); fh = (struct fc_frame_header *) skb_transport_header(skb);
oxid = ntohs(fh->fh_ox_id);
fr = fcoe_dev_from_skb(skb); fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport; fr->fr_dev = lport;

View File

@@ -210,11 +210,8 @@ csio_pci_init(struct pci_dev *pdev, int *bars)
pci_set_master(pdev); pci_set_master(pdev);
pci_try_set_mwi(pdev); pci_try_set_mwi(pdev);
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
} else {
dev_err(&pdev->dev, "No suitable DMA available.\n"); dev_err(&pdev->dev, "No suitable DMA available.\n");
goto err_release_regions; goto err_release_regions;
} }

View File

@@ -1845,8 +1845,8 @@ csio_ln_fdmi_init(struct csio_lnode *ln)
/* Allocate Dma buffers for FDMI response Payload */ /* Allocate Dma buffers for FDMI response Payload */
dma_buf = &ln->mgmt_req->dma_buf; dma_buf = &ln->mgmt_req->dma_buf;
dma_buf->len = 2048; dma_buf->len = 2048;
dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len,
&dma_buf->paddr); &dma_buf->paddr, GFP_KERNEL);
if (!dma_buf->vaddr) { if (!dma_buf->vaddr) {
csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n"); csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
kfree(ln->mgmt_req); kfree(ln->mgmt_req);
@@ -1873,7 +1873,7 @@ csio_ln_fdmi_exit(struct csio_lnode *ln)
dma_buf = &ln->mgmt_req->dma_buf; dma_buf = &ln->mgmt_req->dma_buf;
if (dma_buf->vaddr) if (dma_buf->vaddr)
pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr, dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr,
dma_buf->paddr); dma_buf->paddr);
kfree(ln->mgmt_req); kfree(ln->mgmt_req);

View File

@@ -2349,8 +2349,8 @@ csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
} }
/* Allocate Dma buffers for DDP */ /* Allocate Dma buffers for DDP */
ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size, ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
&ddp_desc->paddr); &ddp_desc->paddr, GFP_KERNEL);
if (!ddp_desc->vaddr) { if (!ddp_desc->vaddr) {
csio_err(hw, csio_err(hw,
"SCSI response DMA buffer (ddp) allocation" "SCSI response DMA buffer (ddp) allocation"
@@ -2372,8 +2372,8 @@ no_mem:
list_for_each(tmp, &scm->ddp_freelist) { list_for_each(tmp, &scm->ddp_freelist) {
ddp_desc = (struct csio_dma_buf *) tmp; ddp_desc = (struct csio_dma_buf *) tmp;
tmp = csio_list_prev(tmp); tmp = csio_list_prev(tmp);
pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
ddp_desc->paddr); ddp_desc->vaddr, ddp_desc->paddr);
list_del_init(&ddp_desc->list); list_del_init(&ddp_desc->list);
kfree(ddp_desc); kfree(ddp_desc);
} }
@@ -2399,8 +2399,8 @@ csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
list_for_each(tmp, &scm->ddp_freelist) { list_for_each(tmp, &scm->ddp_freelist) {
ddp_desc = (struct csio_dma_buf *) tmp; ddp_desc = (struct csio_dma_buf *) tmp;
tmp = csio_list_prev(tmp); tmp = csio_list_prev(tmp);
pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
ddp_desc->paddr); ddp_desc->vaddr, ddp_desc->paddr);
list_del_init(&ddp_desc->list); list_del_init(&ddp_desc->list);
kfree(ddp_desc); kfree(ddp_desc);
} }

View File

@@ -124,8 +124,8 @@ csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
while (n--) { while (n--) {
buf->len = sge->sge_fl_buf_size[sreg]; buf->len = sge->sge_fl_buf_size[sreg];
buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len, buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len,
&buf->paddr); &buf->paddr, GFP_KERNEL);
if (!buf->vaddr) { if (!buf->vaddr) {
csio_err(hw, "Could only fill %d buffers!\n", n + 1); csio_err(hw, "Could only fill %d buffers!\n", n + 1);
return -ENOMEM; return -ENOMEM;
@@ -233,7 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
q = wrm->q_arr[free_idx]; q = wrm->q_arr[free_idx];
q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart); q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
GFP_KERNEL);
if (!q->vstart) { if (!q->vstart) {
csio_err(hw, csio_err(hw,
"Failed to allocate DMA memory for " "Failed to allocate DMA memory for "
@@ -1703,14 +1704,14 @@ csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
buf = &q->un.fl.bufs[j]; buf = &q->un.fl.bufs[j];
if (!buf->vaddr) if (!buf->vaddr)
continue; continue;
pci_free_consistent(hw->pdev, buf->len, dma_free_coherent(&hw->pdev->dev,
buf->vaddr, buf->len, buf->vaddr,
buf->paddr); buf->paddr);
} }
kfree(q->un.fl.bufs); kfree(q->un.fl.bufs);
} }
pci_free_consistent(hw->pdev, q->size, dma_free_coherent(&hw->pdev->dev, q->size,
q->vstart, q->pstart); q->vstart, q->pstart);
} }
kfree(q); kfree(q);
} }

View File

@@ -35,6 +35,11 @@ static unsigned int dbg_level;
#include "../libcxgbi.h" #include "../libcxgbi.h"
#ifdef CONFIG_CHELSIO_T4_DCB
#include <net/dcbevent.h>
#include "cxgb4_dcb.h"
#endif
#define DRV_MODULE_NAME "cxgb4i" #define DRV_MODULE_NAME "cxgb4i"
#define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver" #define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver"
#define DRV_MODULE_VERSION "0.9.5-ko" #define DRV_MODULE_VERSION "0.9.5-ko"
@@ -155,6 +160,15 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.session_recovery_timedout = iscsi_session_recovery_timedout, .session_recovery_timedout = iscsi_session_recovery_timedout,
}; };
#ifdef CONFIG_CHELSIO_T4_DCB
static int
cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *);
static struct notifier_block cxgb4_dcb_change = {
.notifier_call = cxgb4_dcb_change_notify,
};
#endif
static struct scsi_transport_template *cxgb4i_stt; static struct scsi_transport_template *cxgb4i_stt;
/* /*
@@ -574,6 +588,9 @@ static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
int nparams, flowclen16, flowclen; int nparams, flowclen16, flowclen;
nparams = FLOWC_WR_NPARAMS_MIN; nparams = FLOWC_WR_NPARAMS_MIN;
#ifdef CONFIG_CHELSIO_T4_DCB
nparams++;
#endif
flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
flowclen16 = DIV_ROUND_UP(flowclen, 16); flowclen16 = DIV_ROUND_UP(flowclen, 16);
flowclen = flowclen16 * 16; flowclen = flowclen16 * 16;
@@ -595,6 +612,9 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
struct fw_flowc_wr *flowc; struct fw_flowc_wr *flowc;
int nparams, flowclen16, flowclen; int nparams, flowclen16, flowclen;
#ifdef CONFIG_CHELSIO_T4_DCB
u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
#endif
flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen); flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
skb = alloc_wr(flowclen, 0, GFP_ATOMIC); skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
flowc = (struct fw_flowc_wr *)skb->head; flowc = (struct fw_flowc_wr *)skb->head;
@@ -622,6 +642,17 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
flowc->mnemval[8].val = 0; flowc->mnemval[8].val = 0;
flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
flowc->mnemval[8].val = 16384; flowc->mnemval[8].val = 16384;
#ifdef CONFIG_CHELSIO_T4_DCB
flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
if (vlan == CPL_L2T_VLAN_NONE) {
pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
csk->tid);
flowc->mnemval[9].val = cpu_to_be32(0);
} else {
flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >>
VLAN_PRIO_SHIFT);
}
#endif
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
@@ -1600,6 +1631,46 @@ static void release_offload_resources(struct cxgbi_sock *csk)
csk->dst = NULL; csk->dst = NULL;
} }
#ifdef CONFIG_CHELSIO_T4_DCB
static inline u8 get_iscsi_dcb_state(struct net_device *ndev)
{
return ndev->dcbnl_ops->getstate(ndev);
}
static int select_priority(int pri_mask)
{
if (!pri_mask)
return 0;
return (ffs(pri_mask) - 1);
}
static u8 get_iscsi_dcb_priority(struct net_device *ndev)
{
int rv;
u8 caps;
struct dcb_app iscsi_dcb_app = {
.protocol = 3260
};
rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
if (rv)
return 0;
if (caps & DCB_CAP_DCBX_VER_IEEE) {
iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
} else if (caps & DCB_CAP_DCBX_VER_CEE) {
iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
rv = dcb_getapp(ndev, &iscsi_dcb_app);
}
log_debug(1 << CXGBI_DBG_ISCSI,
"iSCSI priority is set to %u\n", select_priority(rv));
return select_priority(rv);
}
#endif
static int init_act_open(struct cxgbi_sock *csk) static int init_act_open(struct cxgbi_sock *csk)
{ {
struct cxgbi_device *cdev = csk->cdev; struct cxgbi_device *cdev = csk->cdev;
@@ -1613,7 +1684,9 @@ static int init_act_open(struct cxgbi_sock *csk)
unsigned int size, size6; unsigned int size, size6;
unsigned int linkspeed; unsigned int linkspeed;
unsigned int rcv_winf, snd_winf; unsigned int rcv_winf, snd_winf;
#ifdef CONFIG_CHELSIO_T4_DCB
u8 priority = 0;
#endif
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n", "csk 0x%p,%u,0x%lx,%u.\n",
csk, csk->state, csk->flags, csk->tid); csk, csk->state, csk->flags, csk->tid);
@@ -1647,7 +1720,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk); cxgbi_sock_get(csk);
#ifdef CONFIG_CHELSIO_T4_DCB
if (get_iscsi_dcb_state(ndev))
priority = get_iscsi_dcb_priority(ndev);
csk->dcb_priority = priority;
csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
#else
csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
#endif
if (!csk->l2t) { if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name); pr_err("%s, cannot alloc l2t.\n", ndev->name);
goto rel_resource_without_clip; goto rel_resource_without_clip;
@@ -2146,6 +2227,70 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
return 0; return 0;
} }
#ifdef CONFIG_CHELSIO_T4_DCB
static int
cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
void *data)
{
int i, port = 0xFF;
struct net_device *ndev;
struct cxgbi_device *cdev = NULL;
struct dcb_app_type *iscsi_app = data;
struct cxgbi_ports_map *pmap;
u8 priority;
if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
return NOTIFY_DONE;
priority = iscsi_app->app.priority;
} else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
return NOTIFY_DONE;
if (!iscsi_app->app.priority)
return NOTIFY_DONE;
priority = ffs(iscsi_app->app.priority) - 1;
} else {
return NOTIFY_DONE;
}
if (iscsi_app->app.protocol != 3260)
return NOTIFY_DONE;
log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n",
iscsi_app->ifindex, priority);
ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
if (!ndev)
return NOTIFY_DONE;
cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
dev_put(ndev);
if (!cdev)
return NOTIFY_DONE;
pmap = &cdev->pmap;
for (i = 0; i < pmap->used; i++) {
if (pmap->port_csk[i]) {
struct cxgbi_sock *csk = pmap->port_csk[i];
if (csk->dcb_priority != priority) {
iscsi_conn_failure(csk->user_data,
ISCSI_ERR_CONN_FAILED);
pr_info("Restarting iSCSI connection %p with "
"priority %u->%u.\n", csk,
csk->dcb_priority, priority);
}
}
}
return NOTIFY_OK;
}
#endif
static int __init cxgb4i_init_module(void) static int __init cxgb4i_init_module(void)
{ {
int rc; int rc;
@@ -2157,11 +2302,18 @@ static int __init cxgb4i_init_module(void)
return rc; return rc;
cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
#ifdef CONFIG_CHELSIO_T4_DCB
pr_info("%s dcb enabled.\n", DRV_MODULE_NAME);
register_dcbevent_notifier(&cxgb4_dcb_change);
#endif
return 0; return 0;
} }
static void __exit cxgb4i_exit_module(void) static void __exit cxgb4i_exit_module(void)
{ {
#ifdef CONFIG_CHELSIO_T4_DCB
unregister_dcbevent_notifier(&cxgb4_dcb_change);
#endif
cxgb4_unregister_uld(CXGB4_ULD_ISCSI); cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);

View File

@@ -120,6 +120,9 @@ struct cxgbi_sock {
int wr_max_cred; int wr_max_cred;
int wr_cred; int wr_cred;
int wr_una_cred; int wr_una_cred;
#ifdef CONFIG_CHELSIO_T4_DCB
u8 dcb_priority;
#endif
unsigned char hcrc_len; unsigned char hcrc_len;
unsigned char dcrc_len; unsigned char dcrc_len;

View File

@@ -753,105 +753,6 @@ static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
return NULL; return NULL;
} }
static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
{
struct list_head *head = &acb->srb_free_list;
struct ScsiReqBlk *srb = NULL;
if (!list_empty(head)) {
srb = list_entry(head->next, struct ScsiReqBlk, list);
list_del(head->next);
dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
}
return srb;
}
static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
{
dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
list_add_tail(&srb->list, &acb->srb_free_list);
}
static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
struct ScsiReqBlk *srb)
{
dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
srb->cmd, dcb->target_id, dcb->target_lun, srb);
list_add(&srb->list, &dcb->srb_waiting_list);
}
static void srb_waiting_append(struct DeviceCtlBlk *dcb,
struct ScsiReqBlk *srb)
{
dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
srb->cmd, dcb->target_id, dcb->target_lun, srb);
list_add_tail(&srb->list, &dcb->srb_waiting_list);
}
static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
{
dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
srb->cmd, dcb->target_id, dcb->target_lun, srb);
list_add_tail(&srb->list, &dcb->srb_going_list);
}
static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
{
struct ScsiReqBlk *i;
struct ScsiReqBlk *tmp;
dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
srb->cmd, dcb->target_id, dcb->target_lun, srb);
list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
if (i == srb) {
list_del(&srb->list);
break;
}
}
static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
struct ScsiReqBlk *srb)
{
struct ScsiReqBlk *i;
struct ScsiReqBlk *tmp;
dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
srb->cmd, dcb->target_id, dcb->target_lun, srb);
list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
if (i == srb) {
list_del(&srb->list);
break;
}
}
static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
struct ScsiReqBlk *srb)
{
dprintkdbg(DBG_0,
"srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
srb->cmd, dcb->target_id, dcb->target_lun, srb);
list_move(&srb->list, &dcb->srb_waiting_list);
}
static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
struct ScsiReqBlk *srb)
{
dprintkdbg(DBG_0,
"srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
srb->cmd, dcb->target_id, dcb->target_lun, srb);
list_move(&srb->list, &dcb->srb_going_list);
}
/* Sets the timer to wake us up */ /* Sets the timer to wake us up */
static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to) static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
{ {
@@ -923,7 +824,7 @@ static void waiting_process_next(struct AdapterCtlBlk *acb)
/* Try to send to the bus */ /* Try to send to the bus */
if (!start_scsi(acb, pos, srb)) if (!start_scsi(acb, pos, srb))
srb_waiting_to_going_move(pos, srb); list_move(&srb->list, &pos->srb_going_list);
else else
waiting_set_timer(acb, HZ/50); waiting_set_timer(acb, HZ/50);
break; break;
@@ -960,15 +861,15 @@ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
if (dcb->max_command <= list_size(&dcb->srb_going_list) || if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
acb->active_dcb || acb->active_dcb ||
(acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) { (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
srb_waiting_append(dcb, srb); list_add_tail(&srb->list, &dcb->srb_waiting_list);
waiting_process_next(acb); waiting_process_next(acb);
return; return;
} }
if (!start_scsi(acb, dcb, srb)) if (!start_scsi(acb, dcb, srb)) {
srb_going_append(dcb, srb); list_add_tail(&srb->list, &dcb->srb_going_list);
else { } else {
srb_waiting_insert(dcb, srb); list_add(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 50); waiting_set_timer(acb, HZ / 50);
} }
} }
@@ -1045,10 +946,8 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
sgp->length++; sgp->length++;
} }
srb->sg_bus_addr = pci_map_single(dcb->acb->dev, srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
srb->segment_x, srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
SEGMENTX_LEN,
PCI_DMA_TODEVICE);
dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n", dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN); srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
@@ -1116,9 +1015,9 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
cmd->scsi_done = done; cmd->scsi_done = done;
cmd->result = 0; cmd->result = 0;
srb = srb_get_free(acb); srb = list_first_entry_or_null(&acb->srb_free_list,
if (!srb) struct ScsiReqBlk, list);
{ if (!srb) {
/* /*
* Return 1 since we are unable to queue this command at this * Return 1 since we are unable to queue this command at this
* point in time. * point in time.
@@ -1126,12 +1025,13 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
dprintkdbg(DBG_0, "queue_command: No free srb's\n"); dprintkdbg(DBG_0, "queue_command: No free srb's\n");
return 1; return 1;
} }
list_del(&srb->list);
build_srb(cmd, dcb, srb); build_srb(cmd, dcb, srb);
if (!list_empty(&dcb->srb_waiting_list)) { if (!list_empty(&dcb->srb_waiting_list)) {
/* append to waiting queue */ /* append to waiting queue */
srb_waiting_append(dcb, srb); list_add_tail(&srb->list, &dcb->srb_waiting_list);
waiting_process_next(acb); waiting_process_next(acb);
} else { } else {
/* process immediately */ /* process immediately */
@@ -1376,11 +1276,11 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
srb = find_cmd(cmd, &dcb->srb_waiting_list); srb = find_cmd(cmd, &dcb->srb_waiting_list);
if (srb) { if (srb) {
srb_waiting_remove(dcb, srb); list_del(&srb->list);
pci_unmap_srb_sense(acb, srb); pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb); pci_unmap_srb(acb, srb);
free_tag(dcb, srb); free_tag(dcb, srb);
srb_free_insert(acb, srb); list_add_tail(&srb->list, &acb->srb_free_list);
dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n"); dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
cmd->result = DID_ABORT << 16; cmd->result = DID_ABORT << 16;
return SUCCESS; return SUCCESS;
@@ -1969,14 +1869,15 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
xferred -= psge->length; xferred -= psge->length;
} else { } else {
/* Partial SG entry done */ /* Partial SG entry done */
dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
srb->sg_bus_addr, SEGMENTX_LEN,
DMA_TO_DEVICE);
psge->length -= xferred; psge->length -= xferred;
psge->address += xferred; psge->address += xferred;
srb->sg_index = idx; srb->sg_index = idx;
pci_dma_sync_single_for_device(srb->dcb-> dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
acb->dev, srb->sg_bus_addr, SEGMENTX_LEN,
srb->sg_bus_addr, DMA_TO_DEVICE);
SEGMENTX_LEN,
PCI_DMA_TODEVICE);
break; break;
} }
psge++; psge++;
@@ -3083,7 +2984,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
goto disc1; goto disc1;
} }
free_tag(dcb, srb); free_tag(dcb, srb);
srb_going_to_waiting_move(dcb, srb); list_move(&srb->list, &dcb->srb_waiting_list);
dprintkdbg(DBG_KG, dprintkdbg(DBG_KG,
"disconnect: (0x%p) Retry\n", "disconnect: (0x%p) Retry\n",
srb->cmd); srb->cmd);
@@ -3148,7 +3049,7 @@ static void reselect(struct AdapterCtlBlk *acb)
srb->state = SRB_READY; srb->state = SRB_READY;
free_tag(dcb, srb); free_tag(dcb, srb);
srb_going_to_waiting_move(dcb, srb); list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 20); waiting_set_timer(acb, HZ / 20);
/* return; */ /* return; */
@@ -3271,9 +3172,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
/* unmap DC395x SG list */ /* unmap DC395x SG list */
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
srb->sg_bus_addr, SEGMENTX_LEN); srb->sg_bus_addr, SEGMENTX_LEN);
pci_unmap_single(acb->dev, srb->sg_bus_addr, dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
SEGMENTX_LEN, DMA_TO_DEVICE);
PCI_DMA_TODEVICE);
dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
scsi_sg_count(cmd), scsi_bufflen(cmd)); scsi_sg_count(cmd), scsi_bufflen(cmd));
/* unmap the sg segments */ /* unmap the sg segments */
@@ -3291,8 +3191,8 @@ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
/* Unmap sense buffer */ /* Unmap sense buffer */
dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n", dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
srb->segment_x[0].address); srb->segment_x[0].address);
pci_unmap_single(acb->dev, srb->segment_x[0].address, dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
srb->segment_x[0].length, PCI_DMA_FROMDEVICE); srb->segment_x[0].length, DMA_FROM_DEVICE);
/* Restore SG stuff */ /* Restore SG stuff */
srb->total_xfer_length = srb->xferred; srb->total_xfer_length = srb->xferred;
srb->segment_x[0].address = srb->segment_x[0].address =
@@ -3411,7 +3311,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
tempcnt--; tempcnt--;
dcb->max_command = tempcnt; dcb->max_command = tempcnt;
free_tag(dcb, srb); free_tag(dcb, srb);
srb_going_to_waiting_move(dcb, srb); list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 20); waiting_set_timer(acb, HZ / 20);
srb->adapter_status = 0; srb->adapter_status = 0;
srb->target_status = 0; srb->target_status = 0;
@@ -3447,14 +3347,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
} }
} }
if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
scsi_sg_count(cmd), dir);
ckc_only = 0; ckc_only = 0;
/* Check Error Conditions */ /* Check Error Conditions */
ckc_e: ckc_e:
pci_unmap_srb(acb, srb);
if (cmd->cmnd[0] == INQUIRY) { if (cmd->cmnd[0] == INQUIRY) {
unsigned char *base = NULL; unsigned char *base = NULL;
struct ScsiInqData *ptr; struct ScsiInqData *ptr;
@@ -3498,16 +3396,14 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
cmd->cmnd[0], srb->total_xfer_length); cmd->cmnd[0], srb->total_xfer_length);
} }
srb_going_remove(dcb, srb); if (srb != acb->tmp_srb) {
/* Add to free list */ /* Add to free list */
if (srb == acb->tmp_srb)
dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
else {
dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n", dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
cmd, cmd->result); cmd, cmd->result);
srb_free_insert(acb, srb); list_move_tail(&srb->list, &acb->srb_free_list);
} else {
dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
} }
pci_unmap_srb(acb, srb);
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
waiting_process_next(acb); waiting_process_next(acb);
@@ -3535,9 +3431,9 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
result = MK_RES(0, did_flag, 0, 0); result = MK_RES(0, did_flag, 0, 0);
printk("G:%p(%02i-%i) ", p, printk("G:%p(%02i-%i) ", p,
p->device->id, (u8)p->device->lun); p->device->id, (u8)p->device->lun);
srb_going_remove(dcb, srb); list_del(&srb->list);
free_tag(dcb, srb); free_tag(dcb, srb);
srb_free_insert(acb, srb); list_add_tail(&srb->list, &acb->srb_free_list);
p->result = result; p->result = result;
pci_unmap_srb_sense(acb, srb); pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb); pci_unmap_srb(acb, srb);
@@ -3565,8 +3461,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
result = MK_RES(0, did_flag, 0, 0); result = MK_RES(0, did_flag, 0, 0);
printk("W:%p<%02i-%i>", p, p->device->id, printk("W:%p<%02i-%i>", p, p->device->id,
(u8)p->device->lun); (u8)p->device->lun);
srb_waiting_remove(dcb, srb); list_move_tail(&srb->list, &acb->srb_free_list);
srb_free_insert(acb, srb);
p->result = result; p->result = result;
pci_unmap_srb_sense(acb, srb); pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb); pci_unmap_srb(acb, srb);
@@ -3692,9 +3587,9 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE; srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE; srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
/* Map sense buffer */ /* Map sense buffer */
srb->segment_x[0].address = srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
pci_map_single(acb->dev, cmd->sense_buffer, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n", dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
cmd->sense_buffer, srb->segment_x[0].address, cmd->sense_buffer, srb->segment_x[0].address,
SCSI_SENSE_BUFFERSIZE); SCSI_SENSE_BUFFERSIZE);
@@ -3705,7 +3600,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
dprintkl(KERN_DEBUG, dprintkl(KERN_DEBUG,
"request_sense: (0x%p) failed <%02i-%i>\n", "request_sense: (0x%p) failed <%02i-%i>\n",
srb->cmd, dcb->target_id, dcb->target_lun); srb->cmd, dcb->target_id, dcb->target_lun);
srb_going_to_waiting_move(dcb, srb); list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 100); waiting_set_timer(acb, HZ / 100);
} }
} }
@@ -4392,7 +4287,7 @@ static void adapter_init_params(struct AdapterCtlBlk *acb)
/* link static array of srbs into the srb free list */ /* link static array of srbs into the srb free list */
for (i = 0; i < acb->srb_count - 1; i++) for (i = 0; i < acb->srb_count - 1; i++)
srb_free_insert(acb, &acb->srb_array[i]); list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list);
} }

View File

@@ -369,19 +369,28 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
{ {
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
struct scatterlist *sg = scsi_sglist(cmd); struct scatterlist *sg = scsi_sglist(cmd);
int dir = cmd->sc_data_direction; int total = 0, i;
int total, i;
if (dir == DMA_NONE) if (cmd->sc_data_direction == DMA_NONE)
return; return;
spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir); if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
/*
* For pseudo DMA and PIO we need the virtual address instead of
* a dma address, so perform an identity mapping.
*/
spriv->num_sg = scsi_sg_count(cmd);
for (i = 0; i < spriv->num_sg; i++) {
sg[i].dma_address = (uintptr_t)sg_virt(&sg[i]);
total += sg_dma_len(&sg[i]);
}
} else {
spriv->num_sg = scsi_dma_map(cmd);
for (i = 0; i < spriv->num_sg; i++)
total += sg_dma_len(&sg[i]);
}
spriv->cur_residue = sg_dma_len(sg); spriv->cur_residue = sg_dma_len(sg);
spriv->cur_sg = sg; spriv->cur_sg = sg;
total = 0;
for (i = 0; i < spriv->u.num_sg; i++)
total += sg_dma_len(&sg[i]);
spriv->tot_residue = total; spriv->tot_residue = total;
} }
@@ -441,13 +450,8 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
{ {
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
int dir = cmd->sc_data_direction; scsi_dma_unmap(cmd);
if (dir == DMA_NONE)
return;
esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
} }
static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
@@ -478,17 +482,6 @@ static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
spriv->tot_residue = ent->saved_tot_residue; spriv->tot_residue = ent->saved_tot_residue;
} }
static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
{
if (cmd->cmd_len == 6 ||
cmd->cmd_len == 10 ||
cmd->cmd_len == 12) {
esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
} else {
esp->flags |= ESP_FLAG_DOING_SLOWCMD;
}
}
static void esp_write_tgt_config3(struct esp *esp, int tgt) static void esp_write_tgt_config3(struct esp *esp, int tgt)
{ {
if (esp->rev > ESP100A) { if (esp->rev > ESP100A) {
@@ -624,6 +617,26 @@ static void esp_free_lun_tag(struct esp_cmd_entry *ent,
} }
} }
static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
{
ent->sense_ptr = ent->cmd->sense_buffer;
if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
ent->sense_dma = (uintptr_t)ent->sense_ptr;
return;
}
ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
{
if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
dma_unmap_single(esp->dev, ent->sense_dma,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
ent->sense_ptr = NULL;
}
/* When a contingent allegiance conditon is created, we force feed a /* When a contingent allegiance conditon is created, we force feed a
* REQUEST_SENSE command to the device to fetch the sense data. I * REQUEST_SENSE command to the device to fetch the sense data. I
* tried many other schemes, relying on the scsi error handling layer * tried many other schemes, relying on the scsi error handling layer
@@ -645,12 +658,7 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
if (!ent->sense_ptr) { if (!ent->sense_ptr) {
esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n", esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
tgt, lun); tgt, lun);
esp_map_sense(esp, ent);
ent->sense_ptr = cmd->sense_buffer;
ent->sense_dma = esp->ops->map_single(esp,
ent->sense_ptr,
SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE);
} }
ent->saved_sense_ptr = ent->sense_ptr; ent->saved_sense_ptr = ent->sense_ptr;
@@ -717,10 +725,10 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
static void esp_maybe_execute_command(struct esp *esp) static void esp_maybe_execute_command(struct esp *esp)
{ {
struct esp_target_data *tp; struct esp_target_data *tp;
struct esp_lun_data *lp;
struct scsi_device *dev; struct scsi_device *dev;
struct scsi_cmnd *cmd; struct scsi_cmnd *cmd;
struct esp_cmd_entry *ent; struct esp_cmd_entry *ent;
bool select_and_stop = false;
int tgt, lun, i; int tgt, lun, i;
u32 val, start_cmd; u32 val, start_cmd;
u8 *p; u8 *p;
@@ -743,7 +751,6 @@ static void esp_maybe_execute_command(struct esp *esp)
tgt = dev->id; tgt = dev->id;
lun = dev->lun; lun = dev->lun;
tp = &esp->target[tgt]; tp = &esp->target[tgt];
lp = dev->hostdata;
list_move(&ent->list, &esp->active_cmds); list_move(&ent->list, &esp->active_cmds);
@@ -752,7 +759,8 @@ static void esp_maybe_execute_command(struct esp *esp)
esp_map_dma(esp, cmd); esp_map_dma(esp, cmd);
esp_save_pointers(esp, ent); esp_save_pointers(esp, ent);
esp_check_command_len(esp, cmd); if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
select_and_stop = true;
p = esp->command_block; p = esp->command_block;
@@ -793,42 +801,22 @@ static void esp_maybe_execute_command(struct esp *esp)
tp->flags &= ~ESP_TGT_CHECK_NEGO; tp->flags &= ~ESP_TGT_CHECK_NEGO;
} }
/* Process it like a slow command. */ /* If there are multiple message bytes, use Select and Stop */
if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC)) if (esp->msg_out_len)
esp->flags |= ESP_FLAG_DOING_SLOWCMD; select_and_stop = true;
} }
build_identify: build_identify:
/* If we don't have a lun-data struct yet, we're probing *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
* so do not disconnect. Also, do not disconnect unless
* we have a tag on this command.
*/
if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
*p++ = IDENTIFY(1, lun);
else
*p++ = IDENTIFY(0, lun);
if (ent->tag[0] && esp->rev == ESP100) { if (ent->tag[0] && esp->rev == ESP100) {
/* ESP100 lacks select w/atn3 command, use select /* ESP100 lacks select w/atn3 command, use select
* and stop instead. * and stop instead.
*/ */
esp->flags |= ESP_FLAG_DOING_SLOWCMD; select_and_stop = true;
} }
if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { if (select_and_stop) {
start_cmd = ESP_CMD_SELA;
if (ent->tag[0]) {
*p++ = ent->tag[0];
*p++ = ent->tag[1];
start_cmd = ESP_CMD_SA3;
}
for (i = 0; i < cmd->cmd_len; i++)
*p++ = cmd->cmnd[i];
esp->select_state = ESP_SELECT_BASIC;
} else {
esp->cmd_bytes_left = cmd->cmd_len; esp->cmd_bytes_left = cmd->cmd_len;
esp->cmd_bytes_ptr = &cmd->cmnd[0]; esp->cmd_bytes_ptr = &cmd->cmnd[0];
@@ -843,6 +831,19 @@ build_identify:
start_cmd = ESP_CMD_SELAS; start_cmd = ESP_CMD_SELAS;
esp->select_state = ESP_SELECT_MSGOUT; esp->select_state = ESP_SELECT_MSGOUT;
} else {
start_cmd = ESP_CMD_SELA;
if (ent->tag[0]) {
*p++ = ent->tag[0];
*p++ = ent->tag[1];
start_cmd = ESP_CMD_SA3;
}
for (i = 0; i < cmd->cmd_len; i++)
*p++ = cmd->cmnd[i];
esp->select_state = ESP_SELECT_BASIC;
} }
val = tgt; val = tgt;
if (esp->rev == FASHME) if (esp->rev == FASHME)
@@ -902,9 +903,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
} }
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
esp->ops->unmap_single(esp, ent->sense_dma, esp_unmap_sense(esp, ent);
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
ent->sense_ptr = NULL;
/* Restore the message/status bytes to what we actually /* Restore the message/status bytes to what we actually
* saw originally. Also, report that we are providing * saw originally. Also, report that we are providing
@@ -965,7 +964,7 @@ static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_
cmd->scsi_done = done; cmd->scsi_done = done;
spriv = ESP_CMD_PRIV(cmd); spriv = ESP_CMD_PRIV(cmd);
spriv->u.dma_addr = ~(dma_addr_t)0x0; spriv->num_sg = 0;
list_add_tail(&ent->list, &esp->queued_cmds); list_add_tail(&ent->list, &esp->queued_cmds);
@@ -1252,14 +1251,10 @@ static int esp_finish_select(struct esp *esp)
esp_unmap_dma(esp, cmd); esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, cmd->device->hostdata); esp_free_lun_tag(ent, cmd->device->hostdata);
tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
esp->cmd_bytes_ptr = NULL; esp->cmd_bytes_ptr = NULL;
esp->cmd_bytes_left = 0; esp->cmd_bytes_left = 0;
} else { } else {
esp->ops->unmap_single(esp, ent->sense_dma, esp_unmap_sense(esp, ent);
SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE);
ent->sense_ptr = NULL;
} }
/* Now that the state is unwound properly, put back onto /* Now that the state is unwound properly, put back onto
@@ -1303,9 +1298,8 @@ static int esp_finish_select(struct esp *esp)
esp_flush_fifo(esp); esp_flush_fifo(esp);
} }
/* If we are doing a slow command, negotiation, etc. /* If we are doing a Select And Stop command, negotiation, etc.
* we'll do the right thing as we transition to the * we'll do the right thing as we transition to the next phase.
* next phase.
*/ */
esp_event(esp, ESP_EVENT_CHECK_PHASE); esp_event(esp, ESP_EVENT_CHECK_PHASE);
return 0; return 0;
@@ -1338,6 +1332,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
bytes_sent = esp->data_dma_len; bytes_sent = esp->data_dma_len;
bytes_sent -= ecount; bytes_sent -= ecount;
bytes_sent -= esp->send_cmd_residual;
/* /*
* The am53c974 has a DMA 'pecularity'. The doc states: * The am53c974 has a DMA 'pecularity'. The doc states:
@@ -1358,7 +1353,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
u8 *ptr; u8 *ptr;
ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg, ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
&offset, &count); &offset, &count);
if (likely(ptr)) { if (likely(ptr)) {
*(ptr + offset) = bval; *(ptr + offset) = bval;
@@ -2039,11 +2034,8 @@ static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
esp_free_lun_tag(ent, cmd->device->hostdata); esp_free_lun_tag(ent, cmd->device->hostdata);
cmd->result = DID_RESET << 16; cmd->result = DID_RESET << 16;
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
esp->ops->unmap_single(esp, ent->sense_dma, esp_unmap_sense(esp, ent);
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
ent->sense_ptr = NULL;
}
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
list_del(&ent->list); list_del(&ent->list);
@@ -2382,7 +2374,7 @@ static const char *esp_chip_names[] = {
static struct scsi_transport_template *esp_transport_template; static struct scsi_transport_template *esp_transport_template;
int scsi_esp_register(struct esp *esp, struct device *dev) int scsi_esp_register(struct esp *esp)
{ {
static int instance; static int instance;
int err; int err;
@@ -2402,10 +2394,10 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
esp_bootup_reset(esp); esp_bootup_reset(esp);
dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n", dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
esp->host->unique_id, esp->regs, esp->dma_regs, esp->host->unique_id, esp->regs, esp->dma_regs,
esp->host->irq); esp->host->irq);
dev_printk(KERN_INFO, dev, dev_printk(KERN_INFO, esp->dev,
"esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n", "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
esp->host->unique_id, esp_chip_names[esp->rev], esp->host->unique_id, esp_chip_names[esp->rev],
esp->cfreq / 1000000, esp->cfact, esp->scsi_id); esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
@@ -2413,7 +2405,7 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
/* Let the SCSI bus reset settle. */ /* Let the SCSI bus reset settle. */
ssleep(esp_bus_reset_settle); ssleep(esp_bus_reset_settle);
err = scsi_add_host(esp->host, dev); err = scsi_add_host(esp->host, esp->dev);
if (err) if (err)
return err; return err;
@@ -2790,3 +2782,131 @@ MODULE_PARM_DESC(esp_debug,
module_init(esp_init); module_init(esp_init);
module_exit(esp_exit); module_exit(esp_exit);
#ifdef CONFIG_SCSI_ESP_PIO
static inline unsigned int esp_wait_for_fifo(struct esp *esp)
{
int i = 500000;
do {
unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
if (fbytes)
return fbytes;
udelay(1);
} while (--i);
shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
esp_read8(ESP_STATUS));
return 0;
}
static inline int esp_wait_for_intr(struct esp *esp)
{
int i = 500000;
do {
esp->sreg = esp_read8(ESP_STATUS);
if (esp->sreg & ESP_STAT_INTR)
return 0;
udelay(1);
} while (--i);
shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
esp->sreg);
return 1;
}
#define ESP_FIFO_SIZE 16
void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
{
u8 phase = esp->sreg & ESP_STAT_PMASK;
cmd &= ~ESP_CMD_DMA;
esp->send_cmd_error = 0;
if (write) {
u8 *dst = (u8 *)addr;
u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
scsi_esp_cmd(esp, cmd);
while (1) {
if (!esp_wait_for_fifo(esp))
break;
*dst++ = readb(esp->fifo_reg);
--esp_count;
if (!esp_count)
break;
if (esp_wait_for_intr(esp)) {
esp->send_cmd_error = 1;
break;
}
if ((esp->sreg & ESP_STAT_PMASK) != phase)
break;
esp->ireg = esp_read8(ESP_INTRPT);
if (esp->ireg & mask) {
esp->send_cmd_error = 1;
break;
}
if (phase == ESP_MIP)
esp_write8(ESP_CMD_MOK, ESP_CMD);
esp_write8(ESP_CMD_TI, ESP_CMD);
}
} else {
unsigned int n = ESP_FIFO_SIZE;
u8 *src = (u8 *)addr;
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
if (n > esp_count)
n = esp_count;
writesb(esp->fifo_reg, src, n);
src += n;
esp_count -= n;
scsi_esp_cmd(esp, cmd);
while (esp_count) {
if (esp_wait_for_intr(esp)) {
esp->send_cmd_error = 1;
break;
}
if ((esp->sreg & ESP_STAT_PMASK) != phase)
break;
esp->ireg = esp_read8(ESP_INTRPT);
if (esp->ireg & ~ESP_INTR_BSERV) {
esp->send_cmd_error = 1;
break;
}
n = ESP_FIFO_SIZE -
(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
if (n > esp_count)
n = esp_count;
writesb(esp->fifo_reg, src, n);
src += n;
esp_count -= n;
esp_write8(ESP_CMD_TI, ESP_CMD);
}
}
esp->send_cmd_residual = esp_count;
}
EXPORT_SYMBOL(esp_send_pio_cmd);
#endif

View File

@@ -249,11 +249,7 @@
#define SYNC_DEFP_FAST 0x19 /* 10mb/s */ #define SYNC_DEFP_FAST 0x19 /* 10mb/s */
struct esp_cmd_priv { struct esp_cmd_priv {
union { int num_sg;
dma_addr_t dma_addr;
int num_sg;
} u;
int cur_residue; int cur_residue;
struct scatterlist *cur_sg; struct scatterlist *cur_sg;
int tot_residue; int tot_residue;
@@ -363,19 +359,6 @@ struct esp_driver_ops {
void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg); void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg);
u8 (*esp_read8)(struct esp *esp, unsigned long reg); u8 (*esp_read8)(struct esp *esp, unsigned long reg);
/* Map and unmap DMA memory. Eventually the driver will be
* converted to the generic DMA API as soon as SBUS is able to
* cope with that. At such time we can remove this.
*/
dma_addr_t (*map_single)(struct esp *esp, void *buf,
size_t sz, int dir);
int (*map_sg)(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir);
void (*unmap_single)(struct esp *esp, dma_addr_t addr,
size_t sz, int dir);
void (*unmap_sg)(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir);
/* Return non-zero if there is an IRQ pending. Usually this /* Return non-zero if there is an IRQ pending. Usually this
* status bit lives in the DMA controller sitting in front of * status bit lives in the DMA controller sitting in front of
* the ESP. This has to be accurate or else the ESP interrupt * the ESP. This has to be accurate or else the ESP interrupt
@@ -435,7 +418,7 @@ struct esp {
const struct esp_driver_ops *ops; const struct esp_driver_ops *ops;
struct Scsi_Host *host; struct Scsi_Host *host;
void *dev; struct device *dev;
struct esp_cmd_entry *active_cmd; struct esp_cmd_entry *active_cmd;
@@ -490,11 +473,11 @@ struct esp {
u32 flags; u32 flags;
#define ESP_FLAG_DIFFERENTIAL 0x00000001 #define ESP_FLAG_DIFFERENTIAL 0x00000001
#define ESP_FLAG_RESETTING 0x00000002 #define ESP_FLAG_RESETTING 0x00000002
#define ESP_FLAG_DOING_SLOWCMD 0x00000004
#define ESP_FLAG_WIDE_CAPABLE 0x00000008 #define ESP_FLAG_WIDE_CAPABLE 0x00000008
#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 #define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
#define ESP_FLAG_DISABLE_SYNC 0x00000020 #define ESP_FLAG_DISABLE_SYNC 0x00000020
#define ESP_FLAG_USE_FIFO 0x00000040 #define ESP_FLAG_USE_FIFO 0x00000040
#define ESP_FLAG_NO_DMA_MAP 0x00000080
u8 select_state; u8 select_state;
#define ESP_SELECT_NONE 0x00 /* Not selecting */ #define ESP_SELECT_NONE 0x00 /* Not selecting */
@@ -532,7 +515,7 @@ struct esp {
u32 min_period; u32 min_period;
u32 radelay; u32 radelay;
/* Slow command state. */ /* ESP_CMD_SELAS command state */
u8 *cmd_bytes_ptr; u8 *cmd_bytes_ptr;
int cmd_bytes_left; int cmd_bytes_left;
@@ -540,6 +523,11 @@ struct esp {
void *dma; void *dma;
int dmarev; int dmarev;
/* These are used by esp_send_pio_cmd() */
u8 __iomem *fifo_reg;
int send_cmd_error;
u32 send_cmd_residual;
}; };
/* A front-end driver for the ESP chip should do the following in /* A front-end driver for the ESP chip should do the following in
@@ -568,16 +556,18 @@ struct esp {
* example, the DMA engine has to be reset before ESP can * example, the DMA engine has to be reset before ESP can
* be programmed. * be programmed.
* 11) If necessary, call dev_set_drvdata() as needed. * 11) If necessary, call dev_set_drvdata() as needed.
* 12) Call scsi_esp_register() with prepared 'esp' structure * 12) Call scsi_esp_register() with prepared 'esp' structure.
* and a device pointer if possible.
* 13) Check scsi_esp_register() return value, release all resources * 13) Check scsi_esp_register() return value, release all resources
* if an error was returned. * if an error was returned.
*/ */
extern struct scsi_host_template scsi_esp_template; extern struct scsi_host_template scsi_esp_template;
extern int scsi_esp_register(struct esp *, struct device *); extern int scsi_esp_register(struct esp *);
extern void scsi_esp_unregister(struct esp *); extern void scsi_esp_unregister(struct esp *);
extern irqreturn_t scsi_esp_intr(int, void *); extern irqreturn_t scsi_esp_intr(int, void *);
extern void scsi_esp_cmd(struct esp *, u8); extern void scsi_esp_cmd(struct esp *, u8);
extern void esp_send_pio_cmd(struct esp *esp, u32 dma_addr, u32 esp_count,
u32 dma_count, int write, u8 cmd);
#endif /* !(_ESP_SCSI_H) */ #endif /* !(_ESP_SCSI_H) */

View File

@@ -836,8 +836,8 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
u32 fcp_bytes_written = 0; u32 fcp_bytes_written = 0;
unsigned long flags; unsigned long flags;
pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
skb = buf->os_buf; skb = buf->os_buf;
fp = (struct fc_frame *)skb; fp = (struct fc_frame *)skb;
buf->os_buf = NULL; buf->os_buf = NULL;
@@ -977,9 +977,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
skb_reset_transport_header(skb); skb_reset_transport_header(skb);
skb_reset_network_header(skb); skb_reset_network_header(skb);
skb_put(skb, len); skb_put(skb, len);
pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, pa)) {
if (pci_dma_mapping_error(fnic->pdev, pa)) {
r = -ENOMEM; r = -ENOMEM;
printk(KERN_ERR "PCI mapping failed with error %d\n", r); printk(KERN_ERR "PCI mapping failed with error %d\n", r);
goto free_skb; goto free_skb;
@@ -998,8 +997,8 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
struct fc_frame *fp = buf->os_buf; struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(rq->vdev); struct fnic *fnic = vnic_dev_priv(rq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
dev_kfree_skb(fp_skb(fp)); dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL; buf->os_buf = NULL;
@@ -1018,7 +1017,6 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct ethhdr *eth_hdr; struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr; struct vlan_ethhdr *vlan_hdr;
unsigned long flags; unsigned long flags;
int r;
if (!fnic->vlan_hw_insert) { if (!fnic->vlan_hw_insert) {
eth_hdr = (struct ethhdr *)skb_mac_header(skb); eth_hdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1038,11 +1036,10 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
} }
} }
pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
r = pci_dma_mapping_error(fnic->pdev, pa); if (dma_mapping_error(&fnic->pdev->dev, pa)) {
if (r) { printk(KERN_ERR "DMA mapping failed\n");
printk(KERN_ERR "PCI mapping failed with error %d\n", r);
goto free_skb; goto free_skb;
} }
@@ -1058,7 +1055,7 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
irq_restore: irq_restore:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags); spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE); dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
free_skb: free_skb:
kfree_skb(skb); kfree_skb(skb);
} }
@@ -1115,9 +1112,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
if (FC_FCOE_VER) if (FC_FCOE_VER)
FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, pa)) {
if (pci_dma_mapping_error(fnic->pdev, pa)) {
ret = -ENOMEM; ret = -ENOMEM;
printk(KERN_ERR "DMA map failed with error %d\n", ret); printk(KERN_ERR "DMA map failed with error %d\n", ret);
goto free_skb_on_err; goto free_skb_on_err;
@@ -1131,8 +1127,7 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
spin_lock_irqsave(&fnic->wq_lock[0], flags); spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) { if (!vnic_wq_desc_avail(wq)) {
pci_unmap_single(fnic->pdev, pa, dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
tot_len, PCI_DMA_TODEVICE);
ret = -1; ret = -1;
goto irq_restore; goto irq_restore;
} }
@@ -1247,8 +1242,8 @@ static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
struct fc_frame *fp = (struct fc_frame *)skb; struct fc_frame *fp = (struct fc_frame *)skb;
struct fnic *fnic = vnic_dev_priv(wq->vdev); struct fnic *fnic = vnic_dev_priv(wq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr, dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
buf->len, PCI_DMA_TODEVICE); DMA_TO_DEVICE);
dev_kfree_skb_irq(fp_skb(fp)); dev_kfree_skb_irq(fp_skb(fp));
buf->os_buf = NULL; buf->os_buf = NULL;
} }
@@ -1290,8 +1285,8 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct fc_frame *fp = buf->os_buf; struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(wq->vdev); struct fnic *fnic = vnic_dev_priv(wq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr, dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
buf->len, PCI_DMA_TODEVICE); DMA_TO_DEVICE);
dev_kfree_skb(fp_skb(fp)); dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL; buf->os_buf = NULL;

View File

@@ -611,30 +611,15 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* limitation for the device. Try 64-bit first, and * limitation for the device. Try 64-bit first, and
* fail to 32-bit. * fail to 32-bit.
*/ */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) { if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) { if (err) {
shost_printk(KERN_ERR, fnic->lport->host, shost_printk(KERN_ERR, fnic->lport->host,
"No usable DMA configuration " "No usable DMA configuration "
"aborting\n"); "aborting\n");
goto err_out_release_regions; goto err_out_release_regions;
} }
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 32-bit DMA "
"for consistent allocations, aborting.\n");
goto err_out_release_regions;
}
} else {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 64-bit DMA "
"for consistent allocations, aborting.\n");
goto err_out_release_regions;
}
} }
/* Map vNIC resources from BAR0 */ /* Map vNIC resources from BAR0 */

View File

@@ -126,17 +126,17 @@ static void fnic_release_ioreq_buf(struct fnic *fnic,
struct scsi_cmnd *sc) struct scsi_cmnd *sc)
{ {
if (io_req->sgl_list_pa) if (io_req->sgl_list_pa)
pci_unmap_single(fnic->pdev, io_req->sgl_list_pa, dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
scsi_dma_unmap(sc); scsi_dma_unmap(sc);
if (io_req->sgl_cnt) if (io_req->sgl_cnt)
mempool_free(io_req->sgl_list_alloc, mempool_free(io_req->sgl_list_alloc,
fnic->io_sgl_pool[io_req->sgl_type]); fnic->io_sgl_pool[io_req->sgl_type]);
if (io_req->sense_buf_pa) if (io_req->sense_buf_pa)
pci_unmap_single(fnic->pdev, io_req->sense_buf_pa, dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
} }
/* Free up Copy Wq descriptors. Called with copy_wq lock held */ /* Free up Copy Wq descriptors. Called with copy_wq lock held */
@@ -330,7 +330,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
int flags; int flags;
u8 exch_flags; u8 exch_flags;
struct scsi_lun fc_lun; struct scsi_lun fc_lun;
int r;
if (sg_count) { if (sg_count) {
/* For each SGE, create a device desc entry */ /* For each SGE, create a device desc entry */
@@ -342,30 +341,25 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
desc++; desc++;
} }
io_req->sgl_list_pa = pci_map_single io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
(fnic->pdev, io_req->sgl_list,
io_req->sgl_list, sizeof(io_req->sgl_list[0]) * sg_count,
sizeof(io_req->sgl_list[0]) * sg_count, DMA_TO_DEVICE);
PCI_DMA_TODEVICE); if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
printk(KERN_ERR "DMA mapping failed\n");
r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
if (r) {
printk(KERN_ERR "PCI mapping failed with error %d\n", r);
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
} }
} }
io_req->sense_buf_pa = pci_map_single(fnic->pdev, io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
sc->sense_buffer, sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE, SCSI_SENSE_BUFFERSIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa); dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
if (r) {
pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * sg_count, sizeof(io_req->sgl_list[0]) * sg_count,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
printk(KERN_ERR "PCI mapping failed with error %d\n", r); printk(KERN_ERR "DMA mapping failed\n");
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
} }
@@ -2272,33 +2266,17 @@ clean_pending_aborts_end:
static inline int static inline int
fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc) fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{ {
struct blk_queue_tag *bqt = fnic->lport->host->bqt; struct request_queue *q = sc->request->q;
int tag, ret = SCSI_NO_TAG; struct request *dummy;
BUG_ON(!bqt); dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
if (!bqt) { if (IS_ERR(dummy))
pr_err("Tags are not supported\n"); return SCSI_NO_TAG;
goto end;
}
do { sc->tag = sc->request->tag = dummy->tag;
tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1); sc->request->special = sc;
if (tag >= bqt->max_depth) {
pr_err("Tag allocation failure\n");
goto end;
}
} while (test_and_set_bit(tag, bqt->tag_map));
bqt->tag_index[tag] = sc->request; return dummy->tag;
sc->request->tag = tag;
sc->tag = tag;
if (!sc->request->special)
sc->request->special = sc;
ret = tag;
end:
return ret;
} }
/** /**
@@ -2308,20 +2286,9 @@ end:
static inline void static inline void
fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc) fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{ {
struct blk_queue_tag *bqt = fnic->lport->host->bqt; struct request *dummy = sc->request->special;
int tag = sc->request->tag;
if (tag == SCSI_NO_TAG) blk_mq_free_request(dummy);
return;
BUG_ON(!bqt || !bqt->tag_index[tag]);
if (!bqt)
return;
bqt->tag_index[tag] = NULL;
clear_bit(tag, bqt->tag_map);
return;
} }
/* /*
@@ -2380,19 +2347,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
tag = sc->request->tag; tag = sc->request->tag;
if (unlikely(tag < 0)) { if (unlikely(tag < 0)) {
/* /*
* XXX(hch): current the midlayer fakes up a struct * Really should fix the midlayer to pass in a proper
* request for the explicit reset ioctls, and those * request for ioctls...
* don't have a tag allocated to them. The below
* code pokes into midlayer structures to paper over
* this design issue, but that won't work for blk-mq.
*
* Either someone who can actually test the hardware
* will have to come up with a similar hack for the
* blk-mq case, or we'll have to bite the bullet and
* fix the way the EH ioctls work for real, but until
* that happens we fail these explicit requests here.
*/ */
tag = fnic_scsi_host_start_tag(fnic, sc); tag = fnic_scsi_host_start_tag(fnic, sc);
if (unlikely(tag == SCSI_NO_TAG)) if (unlikely(tag == SCSI_NO_TAG))
goto fnic_device_reset_end; goto fnic_device_reset_end;

View File

@@ -195,9 +195,9 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
{ {
vnic_dev_desc_ring_size(ring, desc_count, desc_size); vnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
ring->size_unaligned, ring->size_unaligned,
&ring->base_addr_unaligned); &ring->base_addr_unaligned, GFP_KERNEL);
if (!ring->descs_unaligned) { if (!ring->descs_unaligned) {
printk(KERN_ERR printk(KERN_ERR
@@ -221,7 +221,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{ {
if (ring->descs) { if (ring->descs) {
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
ring->size_unaligned, ring->size_unaligned,
ring->descs_unaligned, ring->descs_unaligned,
ring->base_addr_unaligned); ring->base_addr_unaligned);
@@ -298,9 +298,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0; int err = 0;
if (!vdev->fw_info) { if (!vdev->fw_info) {
vdev->fw_info = pci_alloc_consistent(vdev->pdev, vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info), sizeof(struct vnic_devcmd_fw_info),
&vdev->fw_info_pa); &vdev->fw_info_pa, GFP_KERNEL);
if (!vdev->fw_info) if (!vdev->fw_info)
return -ENOMEM; return -ENOMEM;
@@ -361,8 +361,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int wait = 1000; int wait = 1000;
if (!vdev->stats) { if (!vdev->stats) {
vdev->stats = pci_alloc_consistent(vdev->pdev, vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats), &vdev->stats_pa); sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
if (!vdev->stats) if (!vdev->stats)
return -ENOMEM; return -ENOMEM;
} }
@@ -523,9 +523,9 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
int wait = 1000; int wait = 1000;
if (!vdev->notify) { if (!vdev->notify) {
vdev->notify = pci_alloc_consistent(vdev->pdev, vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify), sizeof(struct vnic_devcmd_notify),
&vdev->notify_pa); &vdev->notify_pa, GFP_KERNEL);
if (!vdev->notify) if (!vdev->notify)
return -ENOMEM; return -ENOMEM;
} }
@@ -647,21 +647,21 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
{ {
if (vdev) { if (vdev) {
if (vdev->notify) if (vdev->notify)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify), sizeof(struct vnic_devcmd_notify),
vdev->notify, vdev->notify,
vdev->notify_pa); vdev->notify_pa);
if (vdev->linkstatus) if (vdev->linkstatus)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(u32), sizeof(u32),
vdev->linkstatus, vdev->linkstatus,
vdev->linkstatus_pa); vdev->linkstatus_pa);
if (vdev->stats) if (vdev->stats)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats), sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa); vdev->stats, vdev->stats_pa);
if (vdev->fw_info) if (vdev->fw_info)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info), sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa); vdev->fw_info, vdev->fw_info_pa);
kfree(vdev); kfree(vdev);

View File

@@ -34,6 +34,7 @@
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0 #define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1 #define HISI_SAS_REJECT_CMD_BIT 1
#define HISI_SAS_RESERVED_IPTT_CNT 96
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer)) #define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table)) #define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -217,7 +218,7 @@ struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba); int (*hw_init)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba, void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device); struct hisi_sas_device *device);
int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx, int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
struct domain_device *device); struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);

View File

@@ -183,7 +183,14 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
{ {
hisi_sas_slot_index_clear(hisi_hba, slot_idx); unsigned long flags;
if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_clear(hisi_hba, slot_idx);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
}
} }
static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
@@ -193,24 +200,34 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
set_bit(slot_idx, bitmap); set_bit(slot_idx, bitmap);
} }
static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx) static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
struct scsi_cmnd *scsi_cmnd)
{ {
unsigned int index; int index;
void *bitmap = hisi_hba->slot_index_tags; void *bitmap = hisi_hba->slot_index_tags;
unsigned long flags;
if (scsi_cmnd)
return scsi_cmnd->request->tag;
spin_lock_irqsave(&hisi_hba->lock, flags);
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
hisi_hba->last_slot_index + 1); hisi_hba->last_slot_index + 1);
if (index >= hisi_hba->slot_index_count) { if (index >= hisi_hba->slot_index_count) {
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, index = find_next_zero_bit(bitmap,
0); hisi_hba->slot_index_count,
if (index >= hisi_hba->slot_index_count) hisi_hba->hw->max_command_entries -
HISI_SAS_RESERVED_IPTT_CNT);
if (index >= hisi_hba->slot_index_count) {
spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -SAS_QUEUE_FULL; return -SAS_QUEUE_FULL;
}
} }
hisi_sas_slot_index_set(hisi_hba, index); hisi_sas_slot_index_set(hisi_hba, index);
*slot_idx = index;
hisi_hba->last_slot_index = index; hisi_hba->last_slot_index = index;
spin_unlock_irqrestore(&hisi_hba->lock, flags);
return 0; return index;
} }
static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
@@ -249,9 +266,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot->idx); hisi_sas_slot_index_free(hisi_hba, slot->idx);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
} }
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -287,13 +302,13 @@ static int hisi_sas_task_prep(struct sas_task *task,
int *pass) int *pass)
{ {
struct domain_device *device = task->dev; struct domain_device *device = task->dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct hisi_hba *hisi_hba;
struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_port *port; struct hisi_sas_port *port;
struct hisi_sas_slot *slot; struct hisi_sas_slot *slot;
struct hisi_sas_cmd_hdr *cmd_hdr_base; struct hisi_sas_cmd_hdr *cmd_hdr_base;
struct asd_sas_port *sas_port = device->port; struct asd_sas_port *sas_port = device->port;
struct device *dev = hisi_hba->dev; struct device *dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx; int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
int n_elem = 0, n_elem_req = 0, n_elem_resp = 0; int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
struct hisi_sas_dq *dq; struct hisi_sas_dq *dq;
@@ -314,6 +329,9 @@ static int hisi_sas_task_prep(struct sas_task *task,
return -ECOMM; return -ECOMM;
} }
hisi_hba = dev_to_hisi_hba(device);
dev = hisi_hba->dev;
if (DEV_IS_GONE(sas_dev)) { if (DEV_IS_GONE(sas_dev)) {
if (sas_dev) if (sas_dev)
dev_info(dev, "task prep: device %d not ready\n", dev_info(dev, "task prep: device %d not ready\n",
@@ -381,16 +399,27 @@ static int hisi_sas_task_prep(struct sas_task *task,
goto err_out_dma_unmap; goto err_out_dma_unmap;
} }
spin_lock_irqsave(&hisi_hba->lock, flags);
if (hisi_hba->hw->slot_index_alloc) if (hisi_hba->hw->slot_index_alloc)
rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx, rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
device); else {
else struct scsi_cmnd *scsi_cmnd = NULL;
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
spin_unlock_irqrestore(&hisi_hba->lock, flags); if (task->uldd_task) {
if (rc) struct ata_queued_cmd *qc;
if (dev_is_sata(device)) {
qc = task->uldd_task;
scsi_cmnd = qc->scsicmd;
} else {
scsi_cmnd = task->uldd_task;
}
}
rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
}
if (rc < 0)
goto err_out_dma_unmap; goto err_out_dma_unmap;
slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx]; slot = &hisi_hba->slot_info[slot_idx];
spin_lock_irqsave(&dq->lock, flags); spin_lock_irqsave(&dq->lock, flags);
@@ -451,9 +480,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
return 0; return 0;
err_out_tag: err_out_tag:
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx); hisi_sas_slot_index_free(hisi_hba, slot_idx);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
err_out_dma_unmap: err_out_dma_unmap:
if (!sas_protocol_ata(task->task_proto)) { if (!sas_protocol_ata(task->task_proto)) {
if (task->num_scatter) { if (task->num_scatter) {
@@ -904,6 +931,9 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
_r.maximum_linkrate = max; _r.maximum_linkrate = max;
_r.minimum_linkrate = min; _r.minimum_linkrate = min;
sas_phy->phy->maximum_linkrate = max;
sas_phy->phy->minimum_linkrate = min;
hisi_hba->hw->phy_disable(hisi_hba, phy_no); hisi_hba->hw->phy_disable(hisi_hba, phy_no);
msleep(100); msleep(100);
hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
@@ -950,8 +980,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
static void hisi_sas_task_done(struct sas_task *task) static void hisi_sas_task_done(struct sas_task *task)
{ {
if (!del_timer(&task->slow_task->timer)) del_timer(&task->slow_task->timer);
return;
complete(&task->slow_task->completion); complete(&task->slow_task->completion);
} }
@@ -960,13 +989,17 @@ static void hisi_sas_tmf_timedout(struct timer_list *t)
struct sas_task_slow *slow = from_timer(slow, t, timer); struct sas_task_slow *slow = from_timer(slow, t, timer);
struct sas_task *task = slow->task; struct sas_task *task = slow->task;
unsigned long flags; unsigned long flags;
bool is_completed = true;
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED; task->task_state_flags |= SAS_TASK_STATE_ABORTED;
is_completed = false;
}
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
complete(&task->slow_task->completion); if (!is_completed)
complete(&task->slow_task->completion);
} }
#define TASK_TIMEOUT 20 #define TASK_TIMEOUT 20
@@ -1019,8 +1052,16 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
struct hisi_sas_slot *slot = task->lldd_task; struct hisi_sas_slot *slot = task->lldd_task;
dev_err(dev, "abort tmf: TMF task timeout and not done\n"); dev_err(dev, "abort tmf: TMF task timeout and not done\n");
if (slot) if (slot) {
struct hisi_sas_cq *cq =
&hisi_hba->cq[slot->dlvry_queue];
/*
* flush tasklet to avoid free'ing task
* before using task in IO completion
*/
tasklet_kill(&cq->tasklet);
slot->task = NULL; slot->task = NULL;
}
goto ex_err; goto ex_err;
} else } else
@@ -1396,6 +1437,17 @@ static int hisi_sas_abort_task(struct sas_task *task)
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) { if (task->task_state_flags & SAS_TASK_STATE_DONE) {
struct hisi_sas_slot *slot = task->lldd_task;
struct hisi_sas_cq *cq;
if (slot) {
/*
* flush tasklet to avoid free'ing task
* before using task in IO completion
*/
cq = &hisi_hba->cq[slot->dlvry_queue];
tasklet_kill(&cq->tasklet);
}
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
rc = TMF_RESP_FUNC_COMPLETE; rc = TMF_RESP_FUNC_COMPLETE;
goto out; goto out;
@@ -1451,12 +1503,19 @@ static int hisi_sas_abort_task(struct sas_task *task)
/* SMP */ /* SMP */
struct hisi_sas_slot *slot = task->lldd_task; struct hisi_sas_slot *slot = task->lldd_task;
u32 tag = slot->idx; u32 tag = slot->idx;
struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
rc = hisi_sas_internal_task_abort(hisi_hba, device, rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag); HISI_SAS_INT_ABT_CMD, tag);
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
task->lldd_task) task->lldd_task) {
hisi_sas_do_release_task(hisi_hba, task, slot); /*
* flush tasklet to avoid free'ing task
* before using task in IO completion
*/
tasklet_kill(&cq->tasklet);
slot->task = NULL;
}
} }
out: out:
@@ -1705,14 +1764,11 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
port = to_hisi_sas_port(sas_port); port = to_hisi_sas_port(sas_port);
/* simply get a slot and send abort command */ /* simply get a slot and send abort command */
spin_lock_irqsave(&hisi_hba->lock, flags); rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); if (rc < 0)
if (rc) {
spin_unlock_irqrestore(&hisi_hba->lock, flags);
goto err_out; goto err_out;
}
spin_unlock_irqrestore(&hisi_hba->lock, flags);
slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx]; slot = &hisi_hba->slot_info[slot_idx];
spin_lock_irqsave(&dq->lock, flags_dq); spin_lock_irqsave(&dq->lock, flags_dq);
@@ -1748,7 +1804,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR; task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
WRITE_ONCE(slot->ready, 1); WRITE_ONCE(slot->ready, 1);
/* send abort command to the chip */ /* send abort command to the chip */
spin_lock_irqsave(&dq->lock, flags); spin_lock_irqsave(&dq->lock, flags);
@@ -1759,9 +1814,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
return 0; return 0;
err_out_tag: err_out_tag:
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx); hisi_sas_slot_index_free(hisi_hba, slot_idx);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
err_out: err_out:
dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
@@ -1823,8 +1876,16 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task; struct hisi_sas_slot *slot = task->lldd_task;
if (slot) if (slot) {
struct hisi_sas_cq *cq =
&hisi_hba->cq[slot->dlvry_queue];
/*
* flush tasklet to avoid free'ing task
* before using task in IO completion
*/
tasklet_kill(&cq->tasklet);
slot->task = NULL; slot->task = NULL;
}
dev_err(dev, "internal task abort: timeout and not done.\n"); dev_err(dev, "internal task abort: timeout and not done.\n");
res = -EIO; res = -EIO;
goto exit; goto exit;
@@ -1861,10 +1922,6 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
hisi_sas_port_notify_formed(sas_phy); hisi_sas_port_notify_formed(sas_phy);
} }
static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
{
}
static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
u8 reg_index, u8 reg_count, u8 *write_data) u8 reg_index, u8 reg_count, u8 *write_data)
{ {
@@ -1954,10 +2011,9 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
.lldd_lu_reset = hisi_sas_lu_reset, .lldd_lu_reset = hisi_sas_lu_reset,
.lldd_query_task = hisi_sas_query_task, .lldd_query_task = hisi_sas_query_task,
.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed, .lldd_port_formed = hisi_sas_port_formed,
.lldd_port_deformed = hisi_sas_port_deformed, .lldd_write_gpio = hisi_sas_write_gpio,
.lldd_write_gpio = hisi_sas_write_gpio,
}; };
void hisi_sas_init_mem(struct hisi_hba *hisi_hba) void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -2120,6 +2176,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
hisi_sas_init_mem(hisi_hba); hisi_sas_init_mem(hisi_hba);
hisi_sas_slot_index_init(hisi_hba); hisi_sas_slot_index_init(hisi_hba);
hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
HISI_SAS_RESERVED_IPTT_CNT;
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
if (!hisi_hba->wq) { if (!hisi_hba->wq) {
@@ -2323,8 +2381,15 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->max_channel = 1; shost->max_channel = 1;
shost->max_cmd_len = 16; shost->max_cmd_len = 16;
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
shost->can_queue = hisi_hba->hw->max_command_entries; if (hisi_hba->hw->slot_index_alloc) {
shost->cmd_per_lun = hisi_hba->hw->max_command_entries; shost->can_queue = hisi_hba->hw->max_command_entries;
shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
} else {
shost->can_queue = hisi_hba->hw->max_command_entries -
HISI_SAS_RESERVED_IPTT_CNT;
shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
HISI_SAS_RESERVED_IPTT_CNT;
}
sha->sas_ha_name = DRV_NAME; sha->sas_ha_name = DRV_NAME;
sha->dev = hisi_hba->dev; sha->dev = hisi_hba->dev;

View File

@@ -1809,7 +1809,6 @@ static struct scsi_host_template sht_v1_hw = {
.scan_start = hisi_sas_scan_start, .scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth, .change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param, .bios_param = sas_bios_param,
.can_queue = 1,
.this_id = -1, .this_id = -1,
.sg_tablesize = SG_ALL, .sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS, .max_sectors = SCSI_DEFAULT_MAX_SECTORS,

View File

@@ -770,7 +770,7 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
/* This function needs to be protected from pre-emption. */ /* This function needs to be protected from pre-emption. */
static int static int
slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
struct domain_device *device) struct domain_device *device)
{ {
int sata_dev = dev_is_sata(device); int sata_dev = dev_is_sata(device);
@@ -778,6 +778,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_device *sas_dev = device->lldd_dev;
int sata_idx = sas_dev->sata_idx; int sata_idx = sas_dev->sata_idx;
int start, end; int start, end;
unsigned long flags;
if (!sata_dev) { if (!sata_dev) {
/* /*
@@ -801,11 +802,14 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
end = 64 * (sata_idx + 2); end = 64 * (sata_idx + 2);
} }
spin_lock_irqsave(&hisi_hba->lock, flags);
while (1) { while (1) {
start = find_next_zero_bit(bitmap, start = find_next_zero_bit(bitmap,
hisi_hba->slot_index_count, start); hisi_hba->slot_index_count, start);
if (start >= end) if (start >= end) {
spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -SAS_QUEUE_FULL; return -SAS_QUEUE_FULL;
}
/* /*
* SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0. * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
*/ */
@@ -815,8 +819,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
} }
set_bit(start, bitmap); set_bit(start, bitmap);
*slot_idx = start; spin_unlock_irqrestore(&hisi_hba->lock, flags);
return 0; return start;
} }
static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx) static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
@@ -2483,7 +2487,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
} }
out: out:
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat; sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -2493,6 +2496,7 @@ out:
} }
task->task_state_flags |= SAS_TASK_STATE_DONE; task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags); spin_lock_irqsave(&device->done_lock, flags);
@@ -3560,7 +3564,6 @@ static struct scsi_host_template sht_v2_hw = {
.scan_start = hisi_sas_scan_start, .scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth, .change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param, .bios_param = sas_bios_param,
.can_queue = 1,
.this_id = -1, .this_id = -1,
.sg_tablesize = SG_ALL, .sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS, .max_sectors = SCSI_DEFAULT_MAX_SECTORS,

View File

@@ -127,6 +127,7 @@
#define PHY_CTRL_RESET_OFF 0 #define PHY_CTRL_RESET_OFF 0
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
#define SL_CFG (PORT_BASE + 0x84) #define SL_CFG (PORT_BASE + 0x84)
#define AIP_LIMIT (PORT_BASE + 0x90)
#define SL_CONTROL (PORT_BASE + 0x94) #define SL_CONTROL (PORT_BASE + 0x94)
#define SL_CONTROL_NOTIFY_EN_OFF 0 #define SL_CONTROL_NOTIFY_EN_OFF 0
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
@@ -431,6 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
(u32)((1ULL << hisi_hba->queue_count) - 1)); (u32)((1ULL << hisi_hba->queue_count) - 1));
hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
@@ -441,7 +443,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
if (pdev->revision >= 0x21) if (pdev->revision >= 0x21)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7aff);
else else
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0); hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
@@ -495,6 +497,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* used for 12G negotiate */ /* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
} }
for (i = 0; i < hisi_hba->queue_count; i++) { for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -1751,7 +1754,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
} }
out: out:
hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat; sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -1761,6 +1763,7 @@ out:
} }
task->task_state_flags |= SAS_TASK_STATE_DONE; task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) { if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags); spin_lock_irqsave(&device->done_lock, flags);
@@ -2098,7 +2101,6 @@ static struct scsi_host_template sht_v3_hw = {
.scan_start = hisi_sas_scan_start, .scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth, .change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param, .bios_param = sas_bios_param,
.can_queue = 1,
.this_id = -1, .this_id = -1,
.sg_tablesize = SG_ALL, .sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS, .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
@@ -2108,6 +2110,7 @@ static struct scsi_host_template sht_v3_hw = {
.target_destroy = sas_target_destroy, .target_destroy = sas_target_destroy,
.ioctl = sas_ioctl, .ioctl = sas_ioctl,
.shost_attrs = host_attrs, .shost_attrs = host_attrs,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
}; };
static const struct hisi_sas_hw hisi_sas_v3_hw = { static const struct hisi_sas_hw hisi_sas_v3_hw = {
@@ -2245,8 +2248,10 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_channel = 1; shost->max_channel = 1;
shost->max_cmd_len = 16; shost->max_cmd_len = 16;
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
shost->can_queue = hisi_hba->hw->max_command_entries; shost->can_queue = hisi_hba->hw->max_command_entries -
shost->cmd_per_lun = hisi_hba->hw->max_command_entries; HISI_SAS_RESERVED_IPTT_CNT;
shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
HISI_SAS_RESERVED_IPTT_CNT;
sha->sas_ha_name = DRV_NAME; sha->sas_ha_name = DRV_NAME;
sha->dev = dev; sha->dev = dev;

View File

@@ -2240,8 +2240,8 @@ static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
chain_size = le32_to_cpu(cp->sg[0].length); chain_size = le32_to_cpu(cp->sg[0].length);
temp64 = pci_map_single(h->pdev, chain_block, chain_size, temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&h->pdev->dev, temp64)) { if (dma_mapping_error(&h->pdev->dev, temp64)) {
/* prevent subsequent unmapping */ /* prevent subsequent unmapping */
cp->sg->address = 0; cp->sg->address = 0;
@@ -2261,7 +2261,7 @@ static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
chain_sg = cp->sg; chain_sg = cp->sg;
temp64 = le64_to_cpu(chain_sg->address); temp64 = le64_to_cpu(chain_sg->address);
chain_size = le32_to_cpu(cp->sg[0].length); chain_size = le32_to_cpu(cp->sg[0].length);
pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE); dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
} }
static int hpsa_map_sg_chain_block(struct ctlr_info *h, static int hpsa_map_sg_chain_block(struct ctlr_info *h,
@@ -2277,8 +2277,8 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
chain_len = sizeof(*chain_sg) * chain_len = sizeof(*chain_sg) *
(le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
chain_sg->Len = cpu_to_le32(chain_len); chain_sg->Len = cpu_to_le32(chain_len);
temp64 = pci_map_single(h->pdev, chain_block, chain_len, temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&h->pdev->dev, temp64)) { if (dma_mapping_error(&h->pdev->dev, temp64)) {
/* prevent subsequent unmapping */ /* prevent subsequent unmapping */
chain_sg->Addr = cpu_to_le64(0); chain_sg->Addr = cpu_to_le64(0);
@@ -2297,8 +2297,8 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
return; return;
chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
} }
@@ -2759,13 +2759,13 @@ static void complete_scsi_command(struct CommandList *cp)
return hpsa_cmd_free_and_done(h, cp, cmd); return hpsa_cmd_free_and_done(h, cp, cmd);
} }
static void hpsa_pci_unmap(struct pci_dev *pdev, static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
struct CommandList *c, int sg_used, int data_direction) int sg_used, enum dma_data_direction data_direction)
{ {
int i; int i;
for (i = 0; i < sg_used; i++) for (i = 0; i < sg_used; i++)
pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
le32_to_cpu(c->SG[i].Len), le32_to_cpu(c->SG[i].Len),
data_direction); data_direction);
} }
@@ -2774,17 +2774,17 @@ static int hpsa_map_one(struct pci_dev *pdev,
struct CommandList *cp, struct CommandList *cp,
unsigned char *buf, unsigned char *buf,
size_t buflen, size_t buflen,
int data_direction) enum dma_data_direction data_direction)
{ {
u64 addr64; u64 addr64;
if (buflen == 0 || data_direction == PCI_DMA_NONE) { if (buflen == 0 || data_direction == DMA_NONE) {
cp->Header.SGList = 0; cp->Header.SGList = 0;
cp->Header.SGTotal = cpu_to_le16(0); cp->Header.SGTotal = cpu_to_le16(0);
return 0; return 0;
} }
addr64 = pci_map_single(pdev, buf, buflen, data_direction); addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
if (dma_mapping_error(&pdev->dev, addr64)) { if (dma_mapping_error(&pdev->dev, addr64)) {
/* Prevent subsequent unmap of something never mapped */ /* Prevent subsequent unmap of something never mapped */
cp->Header.SGList = 0; cp->Header.SGList = 0;
@@ -2845,7 +2845,8 @@ static u32 lockup_detected(struct ctlr_info *h)
#define MAX_DRIVER_CMD_RETRIES 25 #define MAX_DRIVER_CMD_RETRIES 25
static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
struct CommandList *c, int data_direction, unsigned long timeout_msecs) struct CommandList *c, enum dma_data_direction data_direction,
unsigned long timeout_msecs)
{ {
int backoff_time = 10, retry_count = 0; int backoff_time = 10, retry_count = 0;
int rc; int rc;
@@ -2969,8 +2970,8 @@ static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
rc = -1; rc = -1;
goto out; goto out;
} }
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
PCI_DMA_FROMDEVICE, NO_TIMEOUT); NO_TIMEOUT);
if (rc) if (rc)
goto out; goto out;
ei = c->err_info; ei = c->err_info;
@@ -3022,8 +3023,8 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
rc = -1; rc = -1;
goto out; goto out;
} }
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
PCI_DMA_FROMDEVICE, NO_TIMEOUT); NO_TIMEOUT);
if (rc) if (rc)
goto out; goto out;
ei = c->err_info; ei = c->err_info;
@@ -3306,8 +3307,8 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
cmd_free(h, c); cmd_free(h, c);
return -1; return -1;
} }
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
PCI_DMA_FROMDEVICE, NO_TIMEOUT); NO_TIMEOUT);
if (rc) if (rc)
goto out; goto out;
ei = c->err_info; ei = c->err_info;
@@ -3349,8 +3350,8 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
c->Request.CDB[2] = bmic_device_index & 0xff; c->Request.CDB[2] = bmic_device_index & 0xff;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
PCI_DMA_FROMDEVICE, NO_TIMEOUT); NO_TIMEOUT);
if (rc) if (rc)
goto out; goto out;
ei = c->err_info; ei = c->err_info;
@@ -3377,8 +3378,8 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h,
if (rc) if (rc)
goto out; goto out;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
PCI_DMA_FROMDEVICE, NO_TIMEOUT); NO_TIMEOUT);
if (rc) if (rc)
goto out; goto out;
ei = c->err_info; ei = c->err_info;
@@ -3408,7 +3409,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
c->Request.CDB[2] = bmic_device_index & 0xff; c->Request.CDB[2] = bmic_device_index & 0xff;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
NO_TIMEOUT); NO_TIMEOUT);
ei = c->err_info; ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
@@ -3484,7 +3485,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
else else
c->Request.CDB[5] = 0; c->Request.CDB[5] = 0;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
NO_TIMEOUT); NO_TIMEOUT);
if (rc) if (rc)
goto out; goto out;
@@ -3731,8 +3732,8 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
} }
if (extended_response) if (extended_response)
c->Request.CDB[1] = extended_response; c->Request.CDB[1] = extended_response;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
PCI_DMA_FROMDEVICE, NO_TIMEOUT); NO_TIMEOUT);
if (rc) if (rc)
goto out; goto out;
ei = c->err_info; ei = c->err_info;
@@ -6320,8 +6321,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
/* Fill in the scatter gather information */ /* Fill in the scatter gather information */
if (iocommand.buf_size > 0) { if (iocommand.buf_size > 0) {
temp64 = pci_map_single(h->pdev, buff, temp64 = dma_map_single(&h->pdev->dev, buff,
iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); iocommand.buf_size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
c->SG[0].Addr = cpu_to_le64(0); c->SG[0].Addr = cpu_to_le64(0);
c->SG[0].Len = cpu_to_le32(0); c->SG[0].Len = cpu_to_le32(0);
@@ -6335,7 +6336,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
NO_TIMEOUT); NO_TIMEOUT);
if (iocommand.buf_size > 0) if (iocommand.buf_size > 0)
hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c); check_ioctl_unit_attention(h, c);
if (rc) { if (rc) {
rc = -EIO; rc = -EIO;
@@ -6381,13 +6382,9 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
return -EINVAL; return -EINVAL;
if (!capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_RAWIO))
return -EPERM; return -EPERM;
ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); ioc = vmemdup_user(argp, sizeof(*ioc));
if (!ioc) { if (IS_ERR(ioc)) {
status = -ENOMEM; status = PTR_ERR(ioc);
goto cleanup1;
}
if (copy_from_user(ioc, argp, sizeof(*ioc))) {
status = -EFAULT;
goto cleanup1; goto cleanup1;
} }
if ((ioc->buf_size < 1) && if ((ioc->buf_size < 1) &&
@@ -6447,14 +6444,14 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
if (ioc->buf_size > 0) { if (ioc->buf_size > 0) {
int i; int i;
for (i = 0; i < sg_used; i++) { for (i = 0; i < sg_used; i++) {
temp64 = pci_map_single(h->pdev, buff[i], temp64 = dma_map_single(&h->pdev->dev, buff[i],
buff_size[i], PCI_DMA_BIDIRECTIONAL); buff_size[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(&h->pdev->dev, if (dma_mapping_error(&h->pdev->dev,
(dma_addr_t) temp64)) { (dma_addr_t) temp64)) {
c->SG[i].Addr = cpu_to_le64(0); c->SG[i].Addr = cpu_to_le64(0);
c->SG[i].Len = cpu_to_le32(0); c->SG[i].Len = cpu_to_le32(0);
hpsa_pci_unmap(h->pdev, c, i, hpsa_pci_unmap(h->pdev, c, i,
PCI_DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
status = -ENOMEM; status = -ENOMEM;
goto cleanup0; goto cleanup0;
} }
@@ -6467,7 +6464,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
NO_TIMEOUT); NO_TIMEOUT);
if (sg_used) if (sg_used)
hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c); check_ioctl_unit_attention(h, c);
if (status) { if (status) {
status = -EIO; status = -EIO;
@@ -6505,7 +6502,7 @@ cleanup1:
kfree(buff); kfree(buff);
} }
kfree(buff_size); kfree(buff_size);
kfree(ioc); kvfree(ioc);
return status; return status;
} }
@@ -6579,7 +6576,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
int cmd_type) int cmd_type)
{ {
int pci_dir = XFER_NONE; enum dma_data_direction dir = DMA_NONE;
c->cmd_type = CMD_IOCTL_PEND; c->cmd_type = CMD_IOCTL_PEND;
c->scsi_cmd = SCSI_CMD_BUSY; c->scsi_cmd = SCSI_CMD_BUSY;
@@ -6785,18 +6782,18 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
switch (GET_DIR(c->Request.type_attr_dir)) { switch (GET_DIR(c->Request.type_attr_dir)) {
case XFER_READ: case XFER_READ:
pci_dir = PCI_DMA_FROMDEVICE; dir = DMA_FROM_DEVICE;
break; break;
case XFER_WRITE: case XFER_WRITE:
pci_dir = PCI_DMA_TODEVICE; dir = DMA_TO_DEVICE;
break; break;
case XFER_NONE: case XFER_NONE:
pci_dir = PCI_DMA_NONE; dir = DMA_NONE;
break; break;
default: default:
pci_dir = PCI_DMA_BIDIRECTIONAL; dir = DMA_BIDIRECTIONAL;
} }
if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) if (hpsa_map_one(h->pdev, c, buff, size, dir))
return -1; return -1;
return 0; return 0;
} }
@@ -6992,13 +6989,13 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
* CCISS commands, so they must be allocated from the lower 4GiB of * CCISS commands, so they must be allocated from the lower 4GiB of
* memory. * memory.
*/ */
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) { if (err) {
iounmap(vaddr); iounmap(vaddr);
return err; return err;
} }
cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
if (cmd == NULL) { if (cmd == NULL) {
iounmap(vaddr); iounmap(vaddr);
return -ENOMEM; return -ENOMEM;
@@ -7047,7 +7044,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
return -ETIMEDOUT; return -ETIMEDOUT;
} }
pci_free_consistent(pdev, cmd_sz, cmd, paddr64); dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
if (tag & HPSA_ERROR_BIT) { if (tag & HPSA_ERROR_BIT) {
dev_err(&pdev->dev, "controller message %02x:%02x failed\n", dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
@@ -7914,7 +7911,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
kfree(h->cmd_pool_bits); kfree(h->cmd_pool_bits);
h->cmd_pool_bits = NULL; h->cmd_pool_bits = NULL;
if (h->cmd_pool) { if (h->cmd_pool) {
pci_free_consistent(h->pdev, dma_free_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(struct CommandList), h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool, h->cmd_pool,
h->cmd_pool_dhandle); h->cmd_pool_dhandle);
@@ -7922,7 +7919,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
h->cmd_pool_dhandle = 0; h->cmd_pool_dhandle = 0;
} }
if (h->errinfo_pool) { if (h->errinfo_pool) {
pci_free_consistent(h->pdev, dma_free_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(struct ErrorInfo), h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool, h->errinfo_pool,
h->errinfo_pool_dhandle); h->errinfo_pool_dhandle);
@@ -7936,12 +7933,12 @@ static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG), h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
sizeof(unsigned long), sizeof(unsigned long),
GFP_KERNEL); GFP_KERNEL);
h->cmd_pool = pci_alloc_consistent(h->pdev, h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->cmd_pool), h->nr_cmds * sizeof(*h->cmd_pool),
&(h->cmd_pool_dhandle)); &h->cmd_pool_dhandle, GFP_KERNEL);
h->errinfo_pool = pci_alloc_consistent(h->pdev, h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->errinfo_pool), h->nr_cmds * sizeof(*h->errinfo_pool),
&(h->errinfo_pool_dhandle)); &h->errinfo_pool_dhandle, GFP_KERNEL);
if ((h->cmd_pool_bits == NULL) if ((h->cmd_pool_bits == NULL)
|| (h->cmd_pool == NULL) || (h->cmd_pool == NULL)
|| (h->errinfo_pool == NULL)) { || (h->errinfo_pool == NULL)) {
@@ -8068,7 +8065,7 @@ static void hpsa_free_reply_queues(struct ctlr_info *h)
for (i = 0; i < h->nreply_queues; i++) { for (i = 0; i < h->nreply_queues; i++) {
if (!h->reply_queue[i].head) if (!h->reply_queue[i].head)
continue; continue;
pci_free_consistent(h->pdev, dma_free_coherent(&h->pdev->dev,
h->reply_queue_size, h->reply_queue_size,
h->reply_queue[i].head, h->reply_queue[i].head,
h->reply_queue[i].busaddr); h->reply_queue[i].busaddr);
@@ -8594,11 +8591,11 @@ reinit_after_soft_reset:
number_of_controllers++; number_of_controllers++;
/* configure PCI DMA stuff */ /* configure PCI DMA stuff */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc == 0) { if (rc == 0) {
dac = 1; dac = 1;
} else { } else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc == 0) { if (rc == 0) {
dac = 0; dac = 0;
} else { } else {
@@ -8797,8 +8794,8 @@ static void hpsa_flush_cache(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD)) { RAID_CTLR_LUNID, TYPE_CMD)) {
goto out; goto out;
} }
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
PCI_DMA_TODEVICE, DEFAULT_TIMEOUT); DEFAULT_TIMEOUT);
if (rc) if (rc)
goto out; goto out;
if (c->err_info->CommandStatus != 0) if (c->err_info->CommandStatus != 0)
@@ -8833,8 +8830,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD)) RAID_CTLR_LUNID, TYPE_CMD))
goto errout; goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
PCI_DMA_FROMDEVICE, NO_TIMEOUT); NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0)) if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout; goto errout;
@@ -8845,8 +8842,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD)) RAID_CTLR_LUNID, TYPE_CMD))
goto errout; goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
PCI_DMA_TODEVICE, NO_TIMEOUT); NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0)) if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout; goto errout;
@@ -8855,8 +8852,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD)) RAID_CTLR_LUNID, TYPE_CMD))
goto errout; goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
PCI_DMA_FROMDEVICE, NO_TIMEOUT); NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0)) if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout; goto errout;
@@ -9228,9 +9225,9 @@ static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
IOACCEL1_COMMANDLIST_ALIGNMENT); IOACCEL1_COMMANDLIST_ALIGNMENT);
h->ioaccel_cmd_pool = h->ioaccel_cmd_pool =
pci_alloc_consistent(h->pdev, dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
&(h->ioaccel_cmd_pool_dhandle)); &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
h->ioaccel1_blockFetchTable = h->ioaccel1_blockFetchTable =
kmalloc(((h->ioaccel_maxsg + 1) * kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9281,9 +9278,9 @@ static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
IOACCEL2_COMMANDLIST_ALIGNMENT); IOACCEL2_COMMANDLIST_ALIGNMENT);
h->ioaccel2_cmd_pool = h->ioaccel2_cmd_pool =
pci_alloc_consistent(h->pdev, dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
&(h->ioaccel2_cmd_pool_dhandle)); &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
h->ioaccel2_blockFetchTable = h->ioaccel2_blockFetchTable =
kmalloc(((h->ioaccel_maxsg + 1) * kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9356,9 +9353,10 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
h->reply_queue_size = h->max_commands * sizeof(u64); h->reply_queue_size = h->max_commands * sizeof(u64);
for (i = 0; i < h->nreply_queues; i++) { for (i = 0; i < h->nreply_queues; i++) {
h->reply_queue[i].head = pci_alloc_consistent(h->pdev, h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
h->reply_queue_size, h->reply_queue_size,
&(h->reply_queue[i].busaddr)); &h->reply_queue[i].busaddr,
GFP_KERNEL);
if (!h->reply_queue[i].head) { if (!h->reply_queue[i].head) {
rc = -ENOMEM; rc = -ENOMEM;
goto clean1; /* rq, ioaccel */ goto clean1; /* rq, ioaccel */

View File

@@ -2266,7 +2266,6 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
/* /*
* Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
*/ */
target_wait_for_sess_cmds(se_sess);
target_remove_session(se_sess); target_remove_session(se_sess);
tport->ibmv_nexus = NULL; tport->ibmv_nexus = NULL;
kfree(nexus); kfree(nexus);

View File

@@ -208,7 +208,7 @@ module_param(ips, charp, 0);
#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ #define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \ DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
PCI_DMA_BIDIRECTIONAL : \ DMA_BIDIRECTIONAL : \
scb->scsi_cmd->sc_data_direction) scb->scsi_cmd->sc_data_direction)
#ifdef IPS_DEBUG #ifdef IPS_DEBUG
@@ -1529,11 +1529,12 @@ ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
if (ha->ioctl_data && length <= ha->ioctl_len) if (ha->ioctl_data && length <= ha->ioctl_len)
return 0; return 0;
/* there is no buffer or it's not big enough, allocate a new one */ /* there is no buffer or it's not big enough, allocate a new one */
bigger_buf = pci_alloc_consistent(ha->pcidev, length, &dma_busaddr); bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr,
GFP_KERNEL);
if (bigger_buf) { if (bigger_buf) {
/* free the old memory */ /* free the old memory */
pci_free_consistent(ha->pcidev, ha->ioctl_len, ha->ioctl_data, dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
ha->ioctl_busaddr); ha->ioctl_data, ha->ioctl_busaddr);
/* use the new memory */ /* use the new memory */
ha->ioctl_data = (char *) bigger_buf; ha->ioctl_data = (char *) bigger_buf;
ha->ioctl_len = length; ha->ioctl_len = length;
@@ -1678,9 +1679,8 @@ ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
} else if (!ha->flash_data) { } else if (!ha->flash_data) {
datasize = pt->CoppCP.cmd.flashfw.total_packets * datasize = pt->CoppCP.cmd.flashfw.total_packets *
pt->CoppCP.cmd.flashfw.count; pt->CoppCP.cmd.flashfw.count;
ha->flash_data = pci_alloc_consistent(ha->pcidev, ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev,
datasize, datasize, &ha->flash_busaddr, GFP_KERNEL);
&ha->flash_busaddr);
if (!ha->flash_data){ if (!ha->flash_data){
printk(KERN_WARNING "Unable to allocate a flash buffer\n"); printk(KERN_WARNING "Unable to allocate a flash buffer\n");
return IPS_FAILURE; return IPS_FAILURE;
@@ -1858,7 +1858,7 @@ ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
scb->data_len = ha->flash_datasize; scb->data_len = ha->flash_datasize;
scb->data_busaddr = scb->data_busaddr =
pci_map_single(ha->pcidev, ha->flash_data, scb->data_len, dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len,
IPS_DMA_DIR(scb)); IPS_DMA_DIR(scb));
scb->flags |= IPS_SCB_MAP_SINGLE; scb->flags |= IPS_SCB_MAP_SINGLE;
scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb); scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
@@ -1880,8 +1880,8 @@ ips_free_flash_copperhead(ips_ha_t * ha)
if (ha->flash_data == ips_FlashData) if (ha->flash_data == ips_FlashData)
test_and_clear_bit(0, &ips_FlashDataInUse); test_and_clear_bit(0, &ips_FlashDataInUse);
else if (ha->flash_data) else if (ha->flash_data)
pci_free_consistent(ha->pcidev, ha->flash_len, ha->flash_data, dma_free_coherent(&ha->pcidev->dev, ha->flash_len,
ha->flash_busaddr); ha->flash_data, ha->flash_busaddr);
ha->flash_data = NULL; ha->flash_data = NULL;
} }
@@ -3485,6 +3485,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
case START_STOP: case START_STOP:
scb->scsi_cmd->result = DID_OK << 16; scb->scsi_cmd->result = DID_OK << 16;
break;
case TEST_UNIT_READY: case TEST_UNIT_READY:
case INQUIRY: case INQUIRY:
@@ -4212,7 +4213,7 @@ ips_free(ips_ha_t * ha)
if (ha) { if (ha) {
if (ha->enq) { if (ha->enq) {
pci_free_consistent(ha->pcidev, sizeof(IPS_ENQ), dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ),
ha->enq, ha->enq_busaddr); ha->enq, ha->enq_busaddr);
ha->enq = NULL; ha->enq = NULL;
} }
@@ -4221,7 +4222,7 @@ ips_free(ips_ha_t * ha)
ha->conf = NULL; ha->conf = NULL;
if (ha->adapt) { if (ha->adapt) {
pci_free_consistent(ha->pcidev, dma_free_coherent(&ha->pcidev->dev,
sizeof (IPS_ADAPTER) + sizeof (IPS_ADAPTER) +
sizeof (IPS_IO_CMD), ha->adapt, sizeof (IPS_IO_CMD), ha->adapt,
ha->adapt->hw_status_start); ha->adapt->hw_status_start);
@@ -4229,7 +4230,7 @@ ips_free(ips_ha_t * ha)
} }
if (ha->logical_drive_info) { if (ha->logical_drive_info) {
pci_free_consistent(ha->pcidev, dma_free_coherent(&ha->pcidev->dev,
sizeof (IPS_LD_INFO), sizeof (IPS_LD_INFO),
ha->logical_drive_info, ha->logical_drive_info,
ha->logical_drive_info_dma_addr); ha->logical_drive_info_dma_addr);
@@ -4243,7 +4244,7 @@ ips_free(ips_ha_t * ha)
ha->subsys = NULL; ha->subsys = NULL;
if (ha->ioctl_data) { if (ha->ioctl_data) {
pci_free_consistent(ha->pcidev, ha->ioctl_len, dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
ha->ioctl_data, ha->ioctl_busaddr); ha->ioctl_data, ha->ioctl_busaddr);
ha->ioctl_data = NULL; ha->ioctl_data = NULL;
ha->ioctl_datasize = 0; ha->ioctl_datasize = 0;
@@ -4276,11 +4277,11 @@ static int
ips_deallocatescbs(ips_ha_t * ha, int cmds) ips_deallocatescbs(ips_ha_t * ha, int cmds)
{ {
if (ha->scbs) { if (ha->scbs) {
pci_free_consistent(ha->pcidev, dma_free_coherent(&ha->pcidev->dev,
IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds, IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
ha->scbs->sg_list.list, ha->scbs->sg_list.list,
ha->scbs->sg_busaddr); ha->scbs->sg_busaddr);
pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds, dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds,
ha->scbs, ha->scbs->scb_busaddr); ha->scbs, ha->scbs->scb_busaddr);
ha->scbs = NULL; ha->scbs = NULL;
} /* end if */ } /* end if */
@@ -4307,17 +4308,16 @@ ips_allocatescbs(ips_ha_t * ha)
METHOD_TRACE("ips_allocatescbs", 1); METHOD_TRACE("ips_allocatescbs", 1);
/* Allocate memory for the SCBs */ /* Allocate memory for the SCBs */
ha->scbs = ha->scbs = dma_alloc_coherent(&ha->pcidev->dev,
pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t), ha->max_cmds * sizeof (ips_scb_t),
&command_dma); &command_dma, GFP_KERNEL);
if (ha->scbs == NULL) if (ha->scbs == NULL)
return 0; return 0;
ips_sg.list = ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev,
pci_alloc_consistent(ha->pcidev, IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds,
IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * &sg_dma, GFP_KERNEL);
ha->max_cmds, &sg_dma);
if (ips_sg.list == NULL) { if (ips_sg.list == NULL) {
pci_free_consistent(ha->pcidev, dma_free_coherent(&ha->pcidev->dev,
ha->max_cmds * sizeof (ips_scb_t), ha->scbs, ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
command_dma); command_dma);
return 0; return 0;
@@ -4446,8 +4446,8 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
if (scb->flags & IPS_SCB_MAP_SG) if (scb->flags & IPS_SCB_MAP_SG)
scsi_dma_unmap(scb->scsi_cmd); scsi_dma_unmap(scb->scsi_cmd);
else if (scb->flags & IPS_SCB_MAP_SINGLE) else if (scb->flags & IPS_SCB_MAP_SINGLE)
pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr,
IPS_DMA_DIR(scb)); scb->data_len, IPS_DMA_DIR(scb));
/* check to make sure this is not our "special" scb */ /* check to make sure this is not our "special" scb */
if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) { if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
@@ -4559,7 +4559,8 @@ ips_flush_and_reset(ips_ha_t *ha)
dma_addr_t command_dma; dma_addr_t command_dma;
/* Create a usuable SCB */ /* Create a usuable SCB */
scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma); scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t),
&command_dma, GFP_KERNEL);
if (scb) { if (scb) {
memset(scb, 0, sizeof(ips_scb_t)); memset(scb, 0, sizeof(ips_scb_t));
ips_init_scb(ha, scb); ips_init_scb(ha, scb);
@@ -4594,7 +4595,7 @@ ips_flush_and_reset(ips_ha_t *ha)
/* Now RESET and INIT the adapter */ /* Now RESET and INIT the adapter */
(*ha->func.reset) (ha); (*ha->func.reset) (ha);
pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma); dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma);
return; return;
} }
@@ -6926,29 +6927,30 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
* are guaranteed to be < 4G. * are guaranteed to be < 4G.
*/ */
if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) && if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
!pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) { !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
(ha)->flags |= IPS_HA_ENH_SG; (ha)->flags |= IPS_HA_ENH_SG;
} else { } else {
if (pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(32)) != 0) { if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING "Unable to set DMA Mask\n"); printk(KERN_WARNING "Unable to set DMA Mask\n");
return ips_abort_init(ha, index); return ips_abort_init(ha, index);
} }
} }
if(ips_cd_boot && !ips_FlashData){ if(ips_cd_boot && !ips_FlashData){
ips_FlashData = pci_alloc_consistent(pci_dev, PAGE_SIZE << 7, ips_FlashData = dma_alloc_coherent(&pci_dev->dev,
&ips_flashbusaddr); PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
} }
ha->enq = pci_alloc_consistent(pci_dev, sizeof (IPS_ENQ), ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ),
&ha->enq_busaddr); &ha->enq_busaddr, GFP_KERNEL);
if (!ha->enq) { if (!ha->enq) {
IPS_PRINTK(KERN_WARNING, pci_dev, IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host inquiry structure\n"); "Unable to allocate host inquiry structure\n");
return ips_abort_init(ha, index); return ips_abort_init(ha, index);
} }
ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) + ha->adapt = dma_alloc_coherent(&pci_dev->dev,
sizeof (IPS_IO_CMD), &dma_address); sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD),
&dma_address, GFP_KERNEL);
if (!ha->adapt) { if (!ha->adapt) {
IPS_PRINTK(KERN_WARNING, pci_dev, IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host adapt & dummy structures\n"); "Unable to allocate host adapt & dummy structures\n");
@@ -6959,7 +6961,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
ha->logical_drive_info = pci_alloc_consistent(pci_dev, sizeof (IPS_LD_INFO), &dma_address); ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev,
sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL);
if (!ha->logical_drive_info) { if (!ha->logical_drive_info) {
IPS_PRINTK(KERN_WARNING, pci_dev, IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate logical drive info structure\n"); "Unable to allocate logical drive info structure\n");
@@ -6997,8 +7000,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
if (ips_ioctlsize < PAGE_SIZE) if (ips_ioctlsize < PAGE_SIZE)
ips_ioctlsize = PAGE_SIZE; ips_ioctlsize = PAGE_SIZE;
ha->ioctl_data = pci_alloc_consistent(pci_dev, ips_ioctlsize, ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize,
&ha->ioctl_busaddr); &ha->ioctl_busaddr, GFP_KERNEL);
ha->ioctl_len = ips_ioctlsize; ha->ioctl_len = ips_ioctlsize;
if (!ha->ioctl_data) { if (!ha->ioctl_data) {
IPS_PRINTK(KERN_WARNING, pci_dev, IPS_PRINTK(KERN_WARNING, pci_dev,

View File

@@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
* the task management request. * the task management request.
* @task_request: the handle to the task request object to start. * @task_request: the handle to the task request object to start.
*/ */
enum sci_task_status sci_controller_start_task(struct isci_host *ihost, enum sci_status sci_controller_start_task(struct isci_host *ihost,
struct isci_remote_device *idev, struct isci_remote_device *idev,
struct isci_request *ireq) struct isci_request *ireq)
{ {
enum sci_status status; enum sci_status status;
@@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
"%s: SCIC Controller starting task from invalid " "%s: SCIC Controller starting task from invalid "
"state\n", "state\n",
__func__); __func__);
return SCI_TASK_FAILURE_INVALID_STATE; return SCI_FAILURE_INVALID_STATE;
} }
status = sci_remote_device_start_task(ihost, idev, ireq); status = sci_remote_device_start_task(ihost, idev, ireq);

View File

@@ -489,7 +489,7 @@ enum sci_status sci_controller_start_io(
struct isci_remote_device *idev, struct isci_remote_device *idev,
struct isci_request *ireq); struct isci_request *ireq);
enum sci_task_status sci_controller_start_task( enum sci_status sci_controller_start_task(
struct isci_host *ihost, struct isci_host *ihost,
struct isci_remote_device *idev, struct isci_remote_device *idev,
struct isci_request *ireq); struct isci_request *ireq);

View File

@@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
if (status == SCI_SUCCESS) { if (status == SCI_SUCCESS) {
if (ireq->stp.rsp.status & ATA_ERR) if (ireq->stp.rsp.status & ATA_ERR)
status = SCI_IO_FAILURE_RESPONSE_VALID; status = SCI_FAILURE_IO_RESPONSE_VALID;
} else { } else {
status = SCI_IO_FAILURE_RESPONSE_VALID; status = SCI_FAILURE_IO_RESPONSE_VALID;
} }
if (status != SCI_SUCCESS) { if (status != SCI_SUCCESS) {

View File

@@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
struct isci_tmf *tmf, unsigned long timeout_ms) struct isci_tmf *tmf, unsigned long timeout_ms)
{ {
DECLARE_COMPLETION_ONSTACK(completion); DECLARE_COMPLETION_ONSTACK(completion);
enum sci_task_status status = SCI_TASK_FAILURE; enum sci_status status = SCI_FAILURE;
struct isci_request *ireq; struct isci_request *ireq;
int ret = TMF_RESP_FUNC_FAILED; int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags; unsigned long flags;
@@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
/* start the TMF io. */ /* start the TMF io. */
status = sci_controller_start_task(ihost, idev, ireq); status = sci_controller_start_task(ihost, idev, ireq);
if (status != SCI_TASK_SUCCESS) { if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev, dev_dbg(&ihost->pdev->dev,
"%s: start_io failed - status = 0x%x, request = %p\n", "%s: start_io failed - status = 0x%x, request = %p\n",
__func__, __func__,

View File

@@ -800,7 +800,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
return rc; return rc;
return iscsi_conn_get_addr_param((struct sockaddr_storage *) return iscsi_conn_get_addr_param((struct sockaddr_storage *)
&addr, param, buf); &addr,
(enum iscsi_param)param, buf);
default: default:
return iscsi_host_get_param(shost, param, buf); return iscsi_host_get_param(shost, param, buf);
} }

View File

@@ -38,30 +38,6 @@ static u8 jazz_esp_read8(struct esp *esp, unsigned long reg)
return *(volatile u8 *)(esp->regs + reg); return *(volatile u8 *)(esp->regs + reg);
} }
static dma_addr_t jazz_esp_map_single(struct esp *esp, void *buf,
size_t sz, int dir)
{
return dma_map_single(esp->dev, buf, sz, dir);
}
static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
return dma_map_sg(esp->dev, sg, num_sg, dir);
}
static void jazz_esp_unmap_single(struct esp *esp, dma_addr_t addr,
size_t sz, int dir)
{
dma_unmap_single(esp->dev, addr, sz, dir);
}
static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
int num_sg, int dir)
{
dma_unmap_sg(esp->dev, sg, num_sg, dir);
}
static int jazz_esp_irq_pending(struct esp *esp) static int jazz_esp_irq_pending(struct esp *esp)
{ {
if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
@@ -117,10 +93,6 @@ static int jazz_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops jazz_esp_ops = { static const struct esp_driver_ops jazz_esp_ops = {
.esp_write8 = jazz_esp_write8, .esp_write8 = jazz_esp_write8,
.esp_read8 = jazz_esp_read8, .esp_read8 = jazz_esp_read8,
.map_single = jazz_esp_map_single,
.map_sg = jazz_esp_map_sg,
.unmap_single = jazz_esp_unmap_single,
.unmap_sg = jazz_esp_unmap_sg,
.irq_pending = jazz_esp_irq_pending, .irq_pending = jazz_esp_irq_pending,
.reset_dma = jazz_esp_reset_dma, .reset_dma = jazz_esp_reset_dma,
.dma_drain = jazz_esp_dma_drain, .dma_drain = jazz_esp_dma_drain,
@@ -182,7 +154,7 @@ static int esp_jazz_probe(struct platform_device *dev)
dev_set_drvdata(&dev->dev, esp); dev_set_drvdata(&dev->dev, esp);
err = scsi_esp_register(esp, &dev->dev); err = scsi_esp_register(esp);
if (err) if (err)
goto fail_free_irq; goto fail_free_irq;

View File

@@ -1872,7 +1872,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
struct fc_lport *lport = shost_priv(shost); struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_fcp_pkt *fsp; struct fc_fcp_pkt *fsp;
struct fc_rport_libfc_priv *rpriv;
int rval; int rval;
int rc = 0; int rc = 0;
struct fc_stats *stats; struct fc_stats *stats;
@@ -1894,8 +1893,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
goto out; goto out;
} }
rpriv = rport->dd_data;
if (!fc_fcp_lport_queue_ready(lport)) { if (!fc_fcp_lport_queue_ready(lport)) {
if (lport->qfull) { if (lport->qfull) {
if (fc_fcp_can_queue_ramp_down(lport)) if (fc_fcp_can_queue_ramp_down(lport))
@@ -2295,8 +2292,7 @@ int fc_setup_fcp(void)
void fc_destroy_fcp(void) void fc_destroy_fcp(void)
{ {
if (scsi_pkt_cachep) kmem_cache_destroy(scsi_pkt_cachep);
kmem_cache_destroy(scsi_pkt_cachep);
} }
/** /**

View File

@@ -1038,8 +1038,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_ls_rjt *rjt; struct fc_els_ls_rjt *rjt;
rjt = fc_frame_payload_get(fp, sizeof(*rjt)); rjt = fc_frame_payload_get(fp, sizeof(*rjt));
FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n", if (!rjt)
rjt->er_reason, rjt->er_explan); FC_RPORT_DBG(rdata, "PLOGI bad response\n");
else
FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
rjt->er_reason, rjt->er_explan);
fc_rport_error_retry(rdata, -FC_EX_ELS_RJT); fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
} }
out: out:
@@ -1158,8 +1161,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
op = fc_frame_payload_op(fp); op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC) { if (op == ELS_LS_ACC) {
pp = fc_frame_payload_get(fp, sizeof(*pp)); pp = fc_frame_payload_get(fp, sizeof(*pp));
if (!pp) if (!pp) {
fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out; goto out;
}
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK); resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n", FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
@@ -1172,8 +1177,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR); fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out; goto out;
} }
if (pp->prli.prli_spp_len < sizeof(pp->spp)) if (pp->prli.prli_spp_len < sizeof(pp->spp)) {
fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out; goto out;
}
fcp_parm = ntohl(pp->spp.spp_params); fcp_parm = ntohl(pp->spp.spp_params);
if (fcp_parm & FCP_SPPF_RETRY) if (fcp_parm & FCP_SPPF_RETRY)
@@ -1211,8 +1218,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
} else { } else {
rjt = fc_frame_payload_get(fp, sizeof(*rjt)); rjt = fc_frame_payload_get(fp, sizeof(*rjt));
FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n", if (!rjt)
rjt->er_reason, rjt->er_explan); FC_RPORT_DBG(rdata, "PRLI bad response\n");
else
FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
rjt->er_reason, rjt->er_explan);
fc_rport_error_retry(rdata, FC_EX_ELS_RJT); fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
} }

View File

@@ -654,7 +654,7 @@ void sas_probe_sata(struct asd_sas_port *port)
/* if libata could not bring the link up, don't surface /* if libata could not bring the link up, don't surface
* the device * the device
*/ */
if (ata_dev_disabled(sas_to_ata_dev(dev))) if (!ata_dev_enabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, __func__, -ENODEV); sas_fail_probe(dev, __func__, -ENODEV);
} }

View File

@@ -260,7 +260,7 @@ static void sas_suspend_devices(struct work_struct *work)
* phy_list is not being mutated * phy_list is not being mutated
*/ */
list_for_each_entry(phy, &port->phy_list, port_phy_el) { list_for_each_entry(phy, &port->phy_list, port_phy_el) {
if (si->dft->lldd_port_formed) if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy); si->dft->lldd_port_deformed(phy);
phy->suspended = 1; phy->suspended = 1;
port->suspended = 1; port->suspended = 1;

View File

@@ -48,17 +48,16 @@ static void smp_task_timedout(struct timer_list *t)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags); spin_lock_irqsave(&task->task_state_lock, flags);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED; task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
}
spin_unlock_irqrestore(&task->task_state_lock, flags); spin_unlock_irqrestore(&task->task_state_lock, flags);
complete(&task->slow_task->completion);
} }
static void smp_task_done(struct sas_task *task) static void smp_task_done(struct sas_task *task)
{ {
if (!del_timer(&task->slow_task->timer)) del_timer(&task->slow_task->timer);
return;
complete(&task->slow_task->completion); complete(&task->slow_task->completion);
} }
@@ -2054,14 +2053,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
return res; return res;
} }
/* delete the old link */ /* we always have to delete the old device when we went here */
if (SAS_ADDR(phy->attached_sas_addr) && SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) { SAS_ADDR(dev->sas_addr), phy_id,
SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n", SAS_ADDR(phy->attached_sas_addr));
SAS_ADDR(dev->sas_addr), phy_id, sas_unregister_devs_sas_addr(dev, phy_id, last);
SAS_ADDR(phy->attached_sas_addr));
sas_unregister_devs_sas_addr(dev, phy_id, last);
}
return sas_discover_new(dev, phy_id); return sas_discover_new(dev, phy_id);
} }

View File

@@ -52,7 +52,7 @@ struct lpfc_sli2_slim;
downloads using bsg */ downloads using bsg */
#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ #define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */ #define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */ #define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
@@ -583,6 +583,25 @@ struct lpfc_mbox_ext_buf_ctx {
struct list_head ext_dmabuf_list; struct list_head ext_dmabuf_list;
}; };
struct lpfc_ras_fwlog {
uint8_t *fwlog_buff;
uint32_t fw_buffcount; /* Buffer size posted to FW */
#define LPFC_RAS_BUFF_ENTERIES 16 /* Each entry can hold max of 64k */
#define LPFC_RAS_MAX_ENTRY_SIZE (64 * 1024)
#define LPFC_RAS_MIN_BUFF_POST_SIZE (256 * 1024)
#define LPFC_RAS_MAX_BUFF_POST_SIZE (1024 * 1024)
uint32_t fw_loglevel; /* Log level set */
struct lpfc_dmabuf lwpd;
struct list_head fwlog_buff_list;
/* RAS support status on adapter */
bool ras_hwsupport; /* RAS Support available on HW or not */
bool ras_enabled; /* Ras Enabled for the function */
#define LPFC_RAS_DISABLE_LOGGING 0x00
#define LPFC_RAS_ENABLE_LOGGING 0x01
bool ras_active; /* RAS logging running state */
};
struct lpfc_hba { struct lpfc_hba {
/* SCSI interface function jump table entries */ /* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf) int (*lpfc_new_scsi_buf)
@@ -790,6 +809,7 @@ struct lpfc_hba {
uint32_t cfg_total_seg_cnt; uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt; uint32_t cfg_sg_seg_cnt;
uint32_t cfg_nvme_seg_cnt; uint32_t cfg_nvme_seg_cnt;
uint32_t cfg_scsi_seg_cnt;
uint32_t cfg_sg_dma_buf_size; uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn; uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn; uint64_t cfg_soft_wwpn;
@@ -833,6 +853,9 @@ struct lpfc_hba {
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */ #define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
uint32_t cfg_enable_SmartSAN; uint32_t cfg_enable_SmartSAN;
uint32_t cfg_enable_mds_diags; uint32_t cfg_enable_mds_diags;
uint32_t cfg_ras_fwlog_level;
uint32_t cfg_ras_fwlog_buffsize;
uint32_t cfg_ras_fwlog_func;
uint32_t cfg_enable_fc4_type; uint32_t cfg_enable_fc4_type;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */ uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */ uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
@@ -963,6 +986,7 @@ struct lpfc_hba {
uint32_t intr_mode; uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF #define LPFC_INTR_ERROR 0xFFFFFFFF
struct list_head port_list; struct list_head port_list;
spinlock_t port_list_lock; /* lock for port_list mutations */
struct lpfc_vport *pport; /* physical lpfc_vport pointer */ struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */ uint16_t max_vpi; /* Maximum virtual nports */
#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ #define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
@@ -1092,6 +1116,9 @@ struct lpfc_hba {
struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX]; struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
uint32_t ctx_idx; uint32_t ctx_idx;
/* RAS Support */
struct lpfc_ras_fwlog ras_fwlog;
uint8_t menlo_flag; /* menlo generic flags */ uint8_t menlo_flag; /* menlo generic flags */
#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */ #define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
uint32_t iocb_cnt; uint32_t iocb_cnt;

View File

@@ -5358,15 +5358,74 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
/* /*
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
* This value can be set to values between 64 and 4096. The default value is * This value can be set to values between 64 and 4096. The default value
* 64, but may be increased to allow for larger Max I/O sizes. The scsi layer * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
* will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
* Because of the additional overhead involved in setting up T10-DIF, * Because of the additional overhead involved in setting up T10-DIF,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3. * and will be limited to 512 if BlockGuard is enabled under SLI3.
*/ */
LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT, static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); module_param(lpfc_sg_seg_cnt, uint, 0444);
MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
/**
* lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
* configured for the adapter
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains a string with the list sizes
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
int len;
len = snprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
len += snprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
phba->cfg_nvme_seg_cnt);
return len;
}
static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
/**
* lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
* @phba: lpfc_hba pointer.
* @val: contains the initial value
*
* Description:
* Validates the initial value is within range and assigns it to the
* adapter. If not in range, an error message is posted and the
* default value is assigned.
*
* Returns:
* zero if value is in range and is set
* -EINVAL if value was out of range
**/
static int
lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
{
if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
phba->cfg_sg_seg_cnt = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
"be set to %d, allowed range is [%d, %d]\n",
val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
return -EINVAL;
}
/* /*
* lpfc_enable_mds_diags: Enable MDS Diagnostics * lpfc_enable_mds_diags: Enable MDS Diagnostics
@@ -5376,6 +5435,31 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
*/ */
LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
/*
* lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
* 0 = Disable firmware logging (default)
* [1-4] = Multiple of 1/4th Mb of host memory for FW logging
* Value range [0..4]. Default value is 0
*/
LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
/*
* lpfc_ras_fwlog_level: Firmware logging verbosity level
* Valid only if firmware logging is enabled
* 0(Least Verbosity) 4 (most verbosity)
* Value range is [0..4]. Default value is 0
*/
LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
/*
* lpfc_ras_fwlog_func: Firmware logging enabled on function number
* Default function which has RAS support : 0
* Value Range is [0..7].
* FW logging is a global action and enablement is via a specific
* port.
*/
LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
/* /*
* lpfc_enable_bbcr: Enable BB Credit Recovery * lpfc_enable_bbcr: Enable BB Credit Recovery
* 0 = BB Credit Recovery disabled * 0 = BB Credit Recovery disabled
@@ -5501,6 +5585,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_protocol, &dev_attr_protocol,
&dev_attr_lpfc_xlane_supported, &dev_attr_lpfc_xlane_supported,
&dev_attr_lpfc_enable_mds_diags, &dev_attr_lpfc_enable_mds_diags,
&dev_attr_lpfc_ras_fwlog_buffsize,
&dev_attr_lpfc_ras_fwlog_level,
&dev_attr_lpfc_ras_fwlog_func,
&dev_attr_lpfc_enable_bbcr, &dev_attr_lpfc_enable_bbcr,
&dev_attr_lpfc_enable_dpp, &dev_attr_lpfc_enable_dpp,
NULL, NULL,
@@ -6587,6 +6674,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_sli_mode_init(phba, lpfc_sli_mode); lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1; phba->cfg_enable_dss = 1;
lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags); lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
/* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
* accommodate 512K and 1M IOs in a single nvme buf and supply
* enough NVME LS iocb buffers for larger connectivity counts.
*/
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
phba->cfg_iocb_cnt = 5;
}
return; return;
} }

View File

@@ -27,6 +27,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/bsg-lib.h> #include <linux/bsg-lib.h>
#include <linux/vmalloc.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
@@ -2843,9 +2844,6 @@ diag_cmd_data_alloc(struct lpfc_hba *phba,
if (nocopydata) { if (nocopydata) {
bpl->tus.f.bdeFlags = 0; bpl->tus.f.bdeFlags = 0;
pci_dma_sync_single_for_device(phba->pcidev,
dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
} else { } else {
memset((uint8_t *)dmp->dma.virt, 0, cnt); memset((uint8_t *)dmp->dma.virt, 0, cnt);
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
@@ -5308,6 +5306,330 @@ job_error:
return rc; return rc;
} }
/**
* lpfc_check_fwlog_support: Check FW log support on the adapter
* @phba: Pointer to HBA context object.
*
* Check if FW Logging support by the adapter
**/
int
lpfc_check_fwlog_support(struct lpfc_hba *phba)
{
struct lpfc_ras_fwlog *ras_fwlog = NULL;
ras_fwlog = &phba->ras_fwlog;
if (ras_fwlog->ras_hwsupport == false)
return -EACCES;
else if (ras_fwlog->ras_enabled == false)
return -EPERM;
else
return 0;
}
/**
* lpfc_bsg_get_ras_config: Get RAS configuration settings
* @job: fc_bsg_job to handle
*
* Get RAS configuration values set.
**/
static int
lpfc_bsg_get_ras_config(struct bsg_job *job)
{
struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct lpfc_vport *vport = shost_priv(shost);
struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_hba *phba = vport->phba;
struct lpfc_bsg_get_ras_config_reply *ras_reply;
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
int rc = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) +
sizeof(struct lpfc_bsg_ras_req)) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"6181 Received RAS_LOG request "
"below minimum size\n");
rc = -EINVAL;
goto ras_job_error;
}
/* Check FW log status */
rc = lpfc_check_fwlog_support(phba);
if (rc == -EACCES || rc == -EPERM)
goto ras_job_error;
ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
/* Current logging state */
if (ras_fwlog->ras_active == true)
ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
else
ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
ras_job_error:
/* make error code available to userspace */
bsg_reply->result = rc;
/* complete the job back to userspace */
bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
return rc;
}
/**
* lpfc_ras_stop_fwlog: Disable FW logging by the adapter
* @phba: Pointer to HBA context object.
*
* Disable FW logging into host memory on the adapter. To
* be done before reading logs from the host memory.
**/
static void
lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
{
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
ras_fwlog->ras_active = false;
/* Disable FW logging to host memory */
writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
}
/**
* lpfc_bsg_set_ras_config: Set FW logging parameters
* @job: fc_bsg_job to handle
*
* Set log-level parameters for FW-logging in host memory
**/
static int
lpfc_bsg_set_ras_config(struct bsg_job *job)
{
struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct lpfc_vport *vport = shost_priv(shost);
struct lpfc_hba *phba = vport->phba;
struct lpfc_bsg_set_ras_config_req *ras_req;
struct fc_bsg_request *bsg_request = job->request;
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
struct fc_bsg_reply *bsg_reply = job->reply;
uint8_t action = 0, log_level = 0;
int rc = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) +
sizeof(struct lpfc_bsg_set_ras_config_req)) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"6182 Received RAS_LOG request "
"below minimum size\n");
rc = -EINVAL;
goto ras_job_error;
}
/* Check FW log status */
rc = lpfc_check_fwlog_support(phba);
if (rc == -EACCES || rc == -EPERM)
goto ras_job_error;
ras_req = (struct lpfc_bsg_set_ras_config_req *)
bsg_request->rqst_data.h_vendor.vendor_cmd;
action = ras_req->action;
log_level = ras_req->log_level;
if (action == LPFC_RASACTION_STOP_LOGGING) {
/* Check if already disabled */
if (ras_fwlog->ras_active == false) {
rc = -ESRCH;
goto ras_job_error;
}
/* Disable logging */
lpfc_ras_stop_fwlog(phba);
} else {
/*action = LPFC_RASACTION_START_LOGGING*/
if (ras_fwlog->ras_active == true) {
rc = -EINPROGRESS;
goto ras_job_error;
}
/* Enable logging */
rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
LPFC_RAS_ENABLE_LOGGING);
if (rc)
rc = -EINVAL;
}
ras_job_error:
/* make error code available to userspace */
bsg_reply->result = rc;
/* complete the job back to userspace */
bsg_job_done(job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
return rc;
}
/**
* lpfc_bsg_get_ras_lwpd: Get log write position data
* @job: fc_bsg_job to handle
*
* Get Offset/Wrap count of the log message written
* in host memory
**/
static int
lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
{
struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct lpfc_vport *vport = shost_priv(shost);
struct lpfc_bsg_get_ras_lwpd *ras_reply;
struct lpfc_hba *phba = vport->phba;
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t lwpd_offset = 0;
uint64_t wrap_value = 0;
int rc = 0;
rc = lpfc_check_fwlog_support(phba);
if (rc == -EACCES || rc == -EPERM)
goto ras_job_error;
if (job->request_len <
sizeof(struct fc_bsg_request) +
sizeof(struct lpfc_bsg_ras_req)) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"6183 Received RAS_LOG request "
"below minimum size\n");
rc = -EINVAL;
goto ras_job_error;
}
ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
lwpd_offset = *((uint32_t *)ras_fwlog->lwpd.virt) & 0xffffffff;
ras_reply->offset = be32_to_cpu(lwpd_offset);
wrap_value = *((uint64_t *)ras_fwlog->lwpd.virt);
ras_reply->wrap_count = be32_to_cpu((wrap_value >> 32) & 0xffffffff);
ras_job_error:
/* make error code available to userspace */
bsg_reply->result = rc;
/* complete the job back to userspace */
bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
return rc;
}
/**
* lpfc_bsg_get_ras_fwlog: Read FW log
* @job: fc_bsg_job to handle
*
* Copy the FW log into the passed buffer.
**/
static int
lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
{
struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct lpfc_vport *vport = shost_priv(shost);
struct lpfc_hba *phba = vport->phba;
struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
struct lpfc_bsg_get_fwlog_req *ras_req;
uint32_t rd_offset, rd_index, offset, pending_wlen;
uint32_t boundary = 0, align_len = 0, write_len = 0;
void *dest, *src, *fwlog_buff;
struct lpfc_ras_fwlog *ras_fwlog = NULL;
struct lpfc_dmabuf *dmabuf, *next;
int rc = 0;
ras_fwlog = &phba->ras_fwlog;
rc = lpfc_check_fwlog_support(phba);
if (rc == -EACCES || rc == -EPERM)
goto ras_job_error;
/* Logging to be stopped before reading */
if (ras_fwlog->ras_active == true) {
rc = -EINPROGRESS;
goto ras_job_error;
}
if (job->request_len <
sizeof(struct fc_bsg_request) +
sizeof(struct lpfc_bsg_get_fwlog_req)) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"6184 Received RAS_LOG request "
"below minimum size\n");
rc = -EINVAL;
goto ras_job_error;
}
ras_req = (struct lpfc_bsg_get_fwlog_req *)
bsg_request->rqst_data.h_vendor.vendor_cmd;
rd_offset = ras_req->read_offset;
/* Allocate memory to read fw log*/
fwlog_buff = vmalloc(ras_req->read_size);
if (!fwlog_buff) {
rc = -ENOMEM;
goto ras_job_error;
}
rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
pending_wlen = ras_req->read_size;
dest = fwlog_buff;
list_for_each_entry_safe(dmabuf, next,
&ras_fwlog->fwlog_buff_list, list) {
if (dmabuf->buffer_tag < rd_index)
continue;
/* Align read to buffer size */
if (offset) {
boundary = ((dmabuf->buffer_tag + 1) *
LPFC_RAS_MAX_ENTRY_SIZE);
align_len = (boundary - offset);
write_len = min_t(u32, align_len,
LPFC_RAS_MAX_ENTRY_SIZE);
} else {
write_len = min_t(u32, pending_wlen,
LPFC_RAS_MAX_ENTRY_SIZE);
align_len = 0;
boundary = 0;
}
src = dmabuf->virt + offset;
memcpy(dest, src, write_len);
pending_wlen -= write_len;
if (!pending_wlen)
break;
dest += write_len;
offset = (offset + write_len) % LPFC_RAS_MAX_ENTRY_SIZE;
}
bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
fwlog_buff, ras_req->read_size);
vfree(fwlog_buff);
ras_job_error:
bsg_reply->result = rc;
bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
return rc;
}
/** /**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle * @job: fc_bsg_job to handle
@@ -5355,6 +5677,18 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_FORCED_LINK_SPEED: case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
rc = lpfc_forced_link_speed(job); rc = lpfc_forced_link_speed(job);
break; break;
case LPFC_BSG_VENDOR_RAS_GET_LWPD:
rc = lpfc_bsg_get_ras_lwpd(job);
break;
case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
rc = lpfc_bsg_get_ras_fwlog(job);
break;
case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
rc = lpfc_bsg_get_ras_config(job);
break;
case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
rc = lpfc_bsg_set_ras_config(job);
break;
default: default:
rc = -EINVAL; rc = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0; bsg_reply->reply_payload_rcv_len = 0;
@@ -5368,7 +5702,7 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
/** /**
* lpfc_bsg_request - handle a bsg request from the FC transport * lpfc_bsg_request - handle a bsg request from the FC transport
* @job: fc_bsg_job to handle * @job: bsg_job to handle
**/ **/
int int
lpfc_bsg_request(struct bsg_job *job) lpfc_bsg_request(struct bsg_job *job)
@@ -5402,7 +5736,7 @@ lpfc_bsg_request(struct bsg_job *job)
/** /**
* lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
* @job: fc_bsg_job that has timed out * @job: bsg_job that has timed out
* *
* This function just aborts the job's IOCB. The aborted IOCB will return to * This function just aborts the job's IOCB. The aborted IOCB will return to
* the waiting function which will handle passing the error back to userspace * the waiting function which will handle passing the error back to userspace

View File

@@ -38,6 +38,10 @@
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10 #define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11 #define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14 #define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
#define LPFC_BSG_VENDOR_RAS_GET_LWPD 16
#define LPFC_BSG_VENDOR_RAS_GET_FWLOG 17
#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18
#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19
struct set_ct_event { struct set_ct_event {
uint32_t command; uint32_t command;
@@ -296,6 +300,38 @@ struct forced_link_speed_support_reply {
uint8_t supported; uint8_t supported;
}; };
struct lpfc_bsg_ras_req {
uint32_t command;
};
struct lpfc_bsg_get_fwlog_req {
uint32_t command;
uint32_t read_size;
uint32_t read_offset;
};
struct lpfc_bsg_get_ras_lwpd {
uint32_t offset;
uint32_t wrap_count;
};
struct lpfc_bsg_set_ras_config_req {
uint32_t command;
uint8_t action;
#define LPFC_RASACTION_STOP_LOGGING 0x00
#define LPFC_RASACTION_START_LOGGING 0x01
uint8_t log_level;
};
struct lpfc_bsg_get_ras_config_reply {
uint8_t state;
#define LPFC_RASLOG_STATE_STOPPED 0x00
#define LPFC_RASLOG_STATE_RUNNING 0x01
uint8_t log_level;
uint32_t log_buff_sz;
};
/* driver only */ /* driver only */
#define SLI_CONFIG_NOT_HANDLED 0 #define SLI_CONFIG_NOT_HANDLED 0
#define SLI_CONFIG_HANDLED 1 #define SLI_CONFIG_HANDLED 1

View File

@@ -545,6 +545,13 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox); int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb); void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
/* RAS Interface */
void lpfc_sli4_ras_init(struct lpfc_hba *phba);
void lpfc_sli4_ras_setup(struct lpfc_hba *phba);
int lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level,
uint32_t fwlog_enable);
int lpfc_check_fwlog_support(struct lpfc_hba *phba);
/* NVME interfaces. */ /* NVME interfaces. */
void lpfc_nvme_unregister_port(struct lpfc_vport *vport, void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp); struct lpfc_nodelist *ndlp);

View File

@@ -445,14 +445,14 @@ lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
struct lpfc_vport *vport_curr; struct lpfc_vport *vport_curr;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport_curr, &phba->port_list, listentry) { list_for_each_entry(vport_curr, &phba->port_list, listentry) {
if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) { if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->port_list_lock, flags);
return vport_curr; return vport_curr;
} }
} }
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->port_list_lock, flags);
return NULL; return NULL;
} }
@@ -471,11 +471,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
"Parse GID_FTrsp: did:x%x flg:x%x x%x", "Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag); Did, ndlp->nlp_flag, vport->fc_flag);
/* Don't assume the rport is always the previous
* FC4 type.
*/
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
/* By default, the driver expects to support FCP FC4 */ /* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP) if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP; ndlp->nlp_fc4_type |= NLP_FC4_FCP;

View File

@@ -550,7 +550,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
unsigned char *statep; unsigned char *statep;
struct nvme_fc_local_port *localport; struct nvme_fc_local_port *localport;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_remote_port *nrport = NULL; struct nvme_fc_remote_port *nrport = NULL;
struct lpfc_nvme_rport *rport; struct lpfc_nvme_rport *rport;
@@ -654,7 +653,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
"\nOutstanding IO x%x\n", outio); "\nOutstanding IO x%x\n", outio);
if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) { if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
len += snprintf(buf + len, size - len, len += snprintf(buf + len, size - len,
"\nNVME Targetport Entry ...\n"); "\nNVME Targetport Entry ...\n");

View File

@@ -7673,8 +7673,11 @@ void
lpfc_els_flush_all_cmd(struct lpfc_hba *phba) lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
{ {
struct lpfc_vport *vport; struct lpfc_vport *vport;
spin_lock_irq(&phba->port_list_lock);
list_for_each_entry(vport, &phba->port_list, listentry) list_for_each_entry(vport, &phba->port_list, listentry)
lpfc_els_flush_cmd(vport); lpfc_els_flush_cmd(vport);
spin_unlock_irq(&phba->port_list_lock);
return; return;
} }

View File

@@ -4193,7 +4193,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_MAPPED_NODE || if (new_state == NLP_STE_MAPPED_NODE ||
new_state == NLP_STE_UNMAPPED_NODE) { new_state == NLP_STE_UNMAPPED_NODE) {
if (ndlp->nlp_fc4_type & NLP_FC4_FCP || if (ndlp->nlp_fc4_type ||
ndlp->nlp_DID == Fabric_DID || ndlp->nlp_DID == Fabric_DID ||
ndlp->nlp_DID == NameServer_DID || ndlp->nlp_DID == NameServer_DID ||
ndlp->nlp_DID == FDMI_DID) { ndlp->nlp_DID == FDMI_DID) {
@@ -5428,12 +5428,10 @@ static void
lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{ {
LIST_HEAD(completions); LIST_HEAD(completions);
struct lpfc_sli *psli;
IOCB_t *icmd; IOCB_t *icmd;
struct lpfc_iocbq *iocb, *next_iocb; struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_sli_ring *pring; struct lpfc_sli_ring *pring;
psli = &phba->sli;
pring = lpfc_phba_elsring(phba); pring = lpfc_phba_elsring(phba);
if (unlikely(!pring)) if (unlikely(!pring))
return; return;
@@ -5938,14 +5936,14 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
} }
} }
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) { list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport->vpi == i) { if (vport->vpi == i) {
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->port_list_lock, flags);
return vport; return vport;
} }
} }
spin_unlock_irqrestore(&phba->hbalock, flags); spin_unlock_irqrestore(&phba->port_list_lock, flags);
return NULL; return NULL;
} }

View File

@@ -186,6 +186,7 @@ struct lpfc_sli_intf {
#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00 #define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10 #define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20 #define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
#define LPFC_CTL_PDEV_CTL_DDL_RAS 0x1000000
#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST) #define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
@@ -964,6 +965,7 @@ struct mbox_header {
/* Subsystem Definitions */ /* Subsystem Definitions */
#define LPFC_MBOX_SUBSYSTEM_NA 0x0 #define LPFC_MBOX_SUBSYSTEM_NA 0x0
#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 #define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
#define LPFC_MBOX_SUBSYSTEM_LOWLEVEL 0xB
#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC #define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
/* Device Specific Definitions */ /* Device Specific Definitions */
@@ -1030,6 +1032,9 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
/* Low level Opcodes */
#define LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION 0x37
/* Mailbox command structures */ /* Mailbox command structures */
struct eq_context { struct eq_context {
uint32_t word0; uint32_t word0;
@@ -1162,6 +1167,45 @@ struct lpfc_mbx_nop {
uint32_t context[2]; uint32_t context[2];
}; };
struct lpfc_mbx_set_ras_fwlog {
struct mbox_header header;
union {
struct {
uint32_t word4;
#define lpfc_fwlog_enable_SHIFT 0
#define lpfc_fwlog_enable_MASK 0x00000001
#define lpfc_fwlog_enable_WORD word4
#define lpfc_fwlog_loglvl_SHIFT 8
#define lpfc_fwlog_loglvl_MASK 0x0000000F
#define lpfc_fwlog_loglvl_WORD word4
#define lpfc_fwlog_ra_SHIFT 15
#define lpfc_fwlog_ra_WORD 0x00000008
#define lpfc_fwlog_buffcnt_SHIFT 16
#define lpfc_fwlog_buffcnt_MASK 0x000000FF
#define lpfc_fwlog_buffcnt_WORD word4
#define lpfc_fwlog_buffsz_SHIFT 24
#define lpfc_fwlog_buffsz_MASK 0x000000FF
#define lpfc_fwlog_buffsz_WORD word4
uint32_t word5;
#define lpfc_fwlog_acqe_SHIFT 0
#define lpfc_fwlog_acqe_MASK 0x0000FFFF
#define lpfc_fwlog_acqe_WORD word5
#define lpfc_fwlog_cqid_SHIFT 16
#define lpfc_fwlog_cqid_MASK 0x0000FFFF
#define lpfc_fwlog_cqid_WORD word5
#define LPFC_MAX_FWLOG_PAGE 16
struct dma_address lwpd;
struct dma_address buff_fwlog[LPFC_MAX_FWLOG_PAGE];
} request;
struct {
uint32_t word0;
} response;
} u;
};
struct cq_context { struct cq_context {
uint32_t word0; uint32_t word0;
#define lpfc_cq_context_event_SHIFT 31 #define lpfc_cq_context_event_SHIFT 31
@@ -3868,6 +3912,7 @@ struct lpfc_mqe {
struct lpfc_mbx_memory_dump_type3 mem_dump_type3; struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
struct lpfc_mbx_set_host_data set_host_data; struct lpfc_mbx_set_host_data set_host_data;
struct lpfc_mbx_nop nop; struct lpfc_mbx_nop nop;
struct lpfc_mbx_set_ras_fwlog ras_fwlog;
} un; } un;
}; };

View File

@@ -3956,7 +3956,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
if (phba->sli_rev == LPFC_SLI_REV4) { if (phba->sli_rev == LPFC_SLI_REV4) {
shost->dma_boundary = shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1; phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
shost->sg_tablesize = phba->cfg_sg_seg_cnt; shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
} }
/* /*
@@ -3988,9 +3988,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
if (error) if (error)
goto out_put_shost; goto out_put_shost;
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->port_list_lock);
list_add_tail(&vport->listentry, &phba->port_list); list_add_tail(&vport->listentry, &phba->port_list);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->port_list_lock);
return vport; return vport;
out_put_shost: out_put_shost:
@@ -4016,9 +4016,9 @@ destroy_port(struct lpfc_vport *vport)
fc_remove_host(shost); fc_remove_host(shost);
scsi_remove_host(shost); scsi_remove_host(shost);
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry); list_del_init(&vport->listentry);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->port_list_lock);
lpfc_cleanup(vport); lpfc_cleanup(vport);
return; return;
@@ -5621,7 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize ndlp management spinlock */ /* Initialize ndlp management spinlock */
spin_lock_init(&phba->ndlp_lock); spin_lock_init(&phba->ndlp_lock);
/* Initialize port_list spinlock */
spin_lock_init(&phba->port_list_lock);
INIT_LIST_HEAD(&phba->port_list); INIT_LIST_HEAD(&phba->port_list);
INIT_LIST_HEAD(&phba->work_list); INIT_LIST_HEAD(&phba->work_list);
init_waitqueue_head(&phba->wait_4_mlo_m_q); init_waitqueue_head(&phba->wait_4_mlo_m_q);
@@ -5919,8 +5922,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/ */
max_buf_size = (2 * SLI4_PAGE_SIZE); max_buf_size = (2 * SLI4_PAGE_SIZE);
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/* /*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5942,9 +5943,16 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) /*
phba->cfg_sg_seg_cnt = * If supporting DIF, reduce the seg count for scsi to
LPFC_MAX_SG_SLI4_SEG_CNT_DIF; * allow room for the DIF sges.
*/
if (phba->cfg_enable_bg &&
phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
else
phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
} else { } else {
/* /*
* The scsi_buf for a regular I/O holds the FCP cmnd, * The scsi_buf for a regular I/O holds the FCP cmnd,
@@ -5958,6 +5966,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Total SGEs for scsi_sg_list */ /* Total SGEs for scsi_sg_list */
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
/* /*
* NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
@@ -5965,10 +5974,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/ */
} }
/* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
"6300 Reducing NVME sg segment "
"cnt to %d\n",
LPFC_MAX_NVME_SEG_CNT);
phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
} else
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
}
/* Initialize the host templates with the updated values. */ /* Initialize the host templates with the updated values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5977,9 +5998,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
"9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", "9087 sg_seg_cnt:%d dmabuf_size:%d "
"total:%d scsi:%d nvme:%d\n",
phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
phba->cfg_total_seg_cnt); phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
phba->cfg_nvme_seg_cnt);
/* Initialize buffer queue management fields */ /* Initialize buffer queue management fields */
INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
@@ -6205,6 +6228,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (phba->cfg_fof) if (phba->cfg_fof)
fof_vectors = 1; fof_vectors = 1;
/* Verify RAS support on adapter */
lpfc_sli4_ras_init(phba);
/* Verify all the SLI4 queues */ /* Verify all the SLI4 queues */
rc = lpfc_sli4_queue_verify(phba); rc = lpfc_sli4_queue_verify(phba);
if (rc) if (rc)
@@ -7967,7 +7993,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
else else
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3028 GET_FUNCTION_CONFIG: failed to find " "3028 GET_FUNCTION_CONFIG: failed to find "
"Resrouce Descriptor:x%x\n", "Resource Descriptor:x%x\n",
LPFC_RSRC_DESC_TYPE_FCFCOE); LPFC_RSRC_DESC_TYPE_FCFCOE);
read_cfg_out: read_cfg_out:
@@ -10492,6 +10518,14 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Stop kthread signal shall trigger work_done one more time */ /* Stop kthread signal shall trigger work_done one more time */
kthread_stop(phba->worker_thread); kthread_stop(phba->worker_thread);
/* Disable FW logging to host memory */
writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
/* Free RAS DMA memory */
if (phba->ras_fwlog.ras_enabled == true)
lpfc_sli4_ras_dma_free(phba);
/* Unset the queues shared with the hardware then release all /* Unset the queues shared with the hardware then release all
* allocated resources. * allocated resources.
*/ */
@@ -10737,6 +10771,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->mds_diags_support = 1; phba->mds_diags_support = 1;
else else
phba->mds_diags_support = 0; phba->mds_diags_support = 0;
return 0; return 0;
} }
@@ -10965,9 +11000,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
kfree(phba->vpi_ids); kfree(phba->vpi_ids);
lpfc_stop_hba_timers(phba); lpfc_stop_hba_timers(phba);
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry); list_del_init(&vport->listentry);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->port_list_lock);
lpfc_debugfs_terminate(vport); lpfc_debugfs_terminate(vport);
@@ -11694,6 +11729,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Check if there are static vports to be created. */ /* Check if there are static vports to be created. */
lpfc_create_static_vport(phba); lpfc_create_static_vport(phba);
/* Enable RAS FW log support */
lpfc_sli4_ras_setup(phba);
return 0; return 0;
out_disable_intr: out_disable_intr:
@@ -11773,9 +11812,9 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
lpfc_sli4_hba_unset(phba); lpfc_sli4_hba_unset(phba);
lpfc_stop_hba_timers(phba); lpfc_stop_hba_timers(phba);
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry); list_del_init(&vport->listentry);
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->port_list_lock);
/* Perform scsi free before driver resource_unset since scsi /* Perform scsi free before driver resource_unset since scsi
* buffers are released to their corresponding pools here. * buffers are released to their corresponding pools here.
@@ -12419,6 +12458,30 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
return; return;
} }
/**
* lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
* @phba: pointer to lpfc hba data structure.
*
* This routine checks to see if RAS is supported by the adapter. Check the
* function through which RAS support enablement is to be done.
**/
void
lpfc_sli4_ras_init(struct lpfc_hba *phba)
{
switch (phba->pcidev->device) {
case PCI_DEVICE_ID_LANCER_G6_FC:
case PCI_DEVICE_ID_LANCER_G7_FC:
phba->ras_fwlog.ras_hwsupport = true;
if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn))
phba->ras_fwlog.ras_enabled = true;
else
phba->ras_fwlog.ras_enabled = false;
break;
default:
phba->ras_fwlog.ras_hwsupport = false;
}
}
/** /**
* lpfc_fof_queue_setup - Set up all the fof queues * lpfc_fof_queue_setup - Set up all the fof queues
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.

View File

@@ -2318,6 +2318,7 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp); lpfc_disc_set_adisc(vport, ndlp);
@@ -2395,6 +2396,7 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp); lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state; return ndlp->nlp_state;
@@ -2652,6 +2654,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state; return ndlp->nlp_state;
} }

View File

@@ -282,7 +282,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport; vport = lport->vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n", "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
lport, qidx, handle); lport, qidx, handle);
kfree(handle); kfree(handle);
} }
@@ -2235,13 +2235,11 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
struct sli4_sge *sgl; struct sli4_sge *sgl;
dma_addr_t pdma_phys_sgl; dma_addr_t pdma_phys_sgl;
uint16_t iotag, lxri = 0; uint16_t iotag, lxri = 0;
int bcnt, num_posted, sgl_size; int bcnt, num_posted;
LIST_HEAD(prep_nblist); LIST_HEAD(prep_nblist);
LIST_HEAD(post_nblist); LIST_HEAD(post_nblist);
LIST_HEAD(nvme_nblist); LIST_HEAD(nvme_nblist);
sgl_size = phba->cfg_sg_dma_buf_size;
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL); lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
if (!lpfc_ncmd) if (!lpfc_ncmd)
@@ -2462,17 +2460,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
/* Limit to LPFC_MAX_NVME_SEG_CNT. /* We need to tell the transport layer + 1 because it takes page
* For now need + 1 to get around NVME transport logic. * alignment into account. When space for the SGL is allocated we
* allocate + 3, one for cmd, one for rsp and one for this alignment
*/ */
if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
"6300 Reducing sg segment cnt to %d\n",
LPFC_MAX_NVME_SEG_CNT);
phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
} else {
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
}
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;

View File

@@ -1339,15 +1339,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
idx = 0; idx = 0;
} }
infop = phba->sli4_hba.nvmet_ctx_info; for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
for (j = 0; j < phba->cfg_nvmet_mrq; j++) { for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { infop = lpfc_get_ctx_list(phba, i, j);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
"6408 TOTAL NVMET ctx for CPU %d " "6408 TOTAL NVMET ctx for CPU %d "
"MRQ %d: cnt %d nextcpu %p\n", "MRQ %d: cnt %d nextcpu %p\n",
i, j, infop->nvmet_ctx_list_cnt, i, j, infop->nvmet_ctx_list_cnt,
infop->nvmet_ctx_next_cpu); infop->nvmet_ctx_next_cpu);
infop++;
} }
} }
return 0; return 0;
@@ -1373,17 +1372,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
pinfo.port_id = vport->fc_myDID; pinfo.port_id = vport->fc_myDID;
/* Limit to LPFC_MAX_NVME_SEG_CNT. /* We need to tell the transport layer + 1 because it takes page
* For now need + 1 to get around NVME transport logic. * alignment into account. When space for the SGL is allocated we
* allocate + 3, one for cmd, one for rsp and one for this alignment
*/ */
if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
"6400 Reducing sg segment cnt to %d\n",
LPFC_MAX_NVME_SEG_CNT);
phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
} else {
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
}
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP; lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;

View File

@@ -202,8 +202,8 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
static void static void
lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{ {
struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode = rdata->pnode; struct lpfc_nodelist *pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd; struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags; unsigned long flags;
struct Scsi_Host *shost = cmd->device->host; struct Scsi_Host *shost = cmd->device->host;
@@ -211,17 +211,19 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
unsigned long latency; unsigned long latency;
int i; int i;
if (cmd->result) if (!vport->stat_data_enabled ||
vport->stat_data_blocked ||
(cmd->result))
return; return;
latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
rdata = lpfc_cmd->rdata;
pnode = rdata->pnode;
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
if (!vport->stat_data_enabled || if (!pnode ||
vport->stat_data_blocked || !pnode->lat_data ||
!pnode || (phba->bucket_type == LPFC_NO_BUCKET)) {
!pnode->lat_data ||
(phba->bucket_type == LPFC_NO_BUCKET)) {
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
return; return;
} }
@@ -1050,7 +1052,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (!found) if (!found)
return NULL; return NULL;
if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
atomic_inc(&ndlp->cmd_pending); atomic_inc(&ndlp->cmd_pending);
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
} }
@@ -4158,9 +4160,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
} }
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
spin_lock_irqsave(&phba->hbalock, flags); /* If pCmd was set to NULL from abort path, do not call scsi_done */
lpfc_cmd->pCmd = NULL; if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0711 FCP cmd already NULL, sid: 0x%06x, "
"did: 0x%06x, oxid: 0x%04x\n",
vport->fc_myDID,
(pnode) ? pnode->nlp_DID : 0,
phba->sli_rev == LPFC_SLI_REV4 ?
lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
return;
}
/* The sdev is not guaranteed to be valid post scsi_done upcall. */ /* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd); cmd->scsi_done(cmd);

View File

@@ -392,11 +392,7 @@ lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
struct lpfc_register doorbell; struct lpfc_register doorbell;
doorbell.word0 = 0; doorbell.word0 = 0;
bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
} }
@@ -3797,6 +3793,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
struct hbq_dmabuf *dmabuf; struct hbq_dmabuf *dmabuf;
struct lpfc_cq_event *cq_event; struct lpfc_cq_event *cq_event;
unsigned long iflag; unsigned long iflag;
int count = 0;
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
phba->hba_flag &= ~HBA_SP_QUEUE_EVT; phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
@@ -3818,16 +3815,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
if (irspiocbq) if (irspiocbq)
lpfc_sli_sp_handle_rspiocb(phba, pring, lpfc_sli_sp_handle_rspiocb(phba, pring,
irspiocbq); irspiocbq);
count++;
break; break;
case CQE_CODE_RECEIVE: case CQE_CODE_RECEIVE:
case CQE_CODE_RECEIVE_V1: case CQE_CODE_RECEIVE_V1:
dmabuf = container_of(cq_event, struct hbq_dmabuf, dmabuf = container_of(cq_event, struct hbq_dmabuf,
cq_event); cq_event);
lpfc_sli4_handle_received_buffer(phba, dmabuf); lpfc_sli4_handle_received_buffer(phba, dmabuf);
count++;
break; break;
default: default:
break; break;
} }
/* Limit the number of events to 64 to avoid soft lockups */
if (count == 64)
break;
} }
} }
@@ -6145,6 +6148,271 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
return; return;
} }
/**
* lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
* @phba: Pointer to HBA context object.
*
* This function is called to free memory allocated for RAS FW logging
* support in the driver.
**/
void
lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
{
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
struct lpfc_dmabuf *dmabuf, *next;
if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
list_for_each_entry_safe(dmabuf, next,
&ras_fwlog->fwlog_buff_list,
list) {
list_del(&dmabuf->list);
dma_free_coherent(&phba->pcidev->dev,
LPFC_RAS_MAX_ENTRY_SIZE,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
}
if (ras_fwlog->lwpd.virt) {
dma_free_coherent(&phba->pcidev->dev,
sizeof(uint32_t) * 2,
ras_fwlog->lwpd.virt,
ras_fwlog->lwpd.phys);
ras_fwlog->lwpd.virt = NULL;
}
ras_fwlog->ras_active = false;
}
/**
* lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
* @phba: Pointer to HBA context object.
* @fwlog_buff_count: Count of buffers to be created.
*
* This routine DMA memory for Log Write Position Data[LPWD] and buffer
* to update FW log is posted to the adapter.
* Buffer count is calculated based on module param ras_fwlog_buffsize
* Size of each buffer posted to FW is 64K.
**/
static int
lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
uint32_t fwlog_buff_count)
{
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
struct lpfc_dmabuf *dmabuf;
int rc = 0, i = 0;
/* Initialize List */
INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
/* Allocate memory for the LWPD */
ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
sizeof(uint32_t) * 2,
&ras_fwlog->lwpd.phys,
GFP_KERNEL);
if (!ras_fwlog->lwpd.virt) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"6185 LWPD Memory Alloc Failed\n");
return -ENOMEM;
}
ras_fwlog->fw_buffcount = fwlog_buff_count;
for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
GFP_KERNEL);
if (!dmabuf) {
rc = -ENOMEM;
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"6186 Memory Alloc failed FW logging");
goto free_mem;
}
dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
LPFC_RAS_MAX_ENTRY_SIZE,
&dmabuf->phys,
GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
rc = -ENOMEM;
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"6187 DMA Alloc Failed FW logging");
goto free_mem;
}
memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
dmabuf->buffer_tag = i;
list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
}
free_mem:
if (rc)
lpfc_sli4_ras_dma_free(phba);
return rc;
}
/**
* lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
* @phba: pointer to lpfc hba data structure.
* @pmboxq: pointer to the driver internal queue element for mailbox command.
*
* Completion handler for driver's RAS MBX command to the device.
**/
static void
lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb;
union lpfc_sli4_cfg_shdr *shdr;
uint32_t shdr_status, shdr_add_status;
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
mb = &pmb->u.mb;
shdr = (union lpfc_sli4_cfg_shdr *)
&pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"6188 FW LOG mailbox "
"completed with status x%x add_status x%x,"
" mbx status x%x\n",
shdr_status, shdr_add_status, mb->mbxStatus);
goto disable_ras;
}
ras_fwlog->ras_active = true;
mempool_free(pmb, phba->mbox_mem_pool);
return;
disable_ras:
/* Free RAS DMA memory */
lpfc_sli4_ras_dma_free(phba);
mempool_free(pmb, phba->mbox_mem_pool);
}
/**
* lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
* @phba: pointer to lpfc hba data structure.
* @fwlog_level: Logging verbosity level.
* @fwlog_enable: Enable/Disable logging.
*
* Initialize memory and post mailbox command to enable FW logging in host
* memory.
**/
int
lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
uint32_t fwlog_level,
uint32_t fwlog_enable)
{
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
struct lpfc_dmabuf *dmabuf;
LPFC_MBOXQ_t *mbox;
uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
int rc = 0;
fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
phba->cfg_ras_fwlog_buffsize);
fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
/*
* If re-enabling FW logging support use earlier allocated
* DMA buffers while posting MBX command.
**/
if (!ras_fwlog->lwpd.virt) {
rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"6189 RAS FW Log Support Not Enabled");
return rc;
}
}
/* Setup Mailbox command */
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"6190 RAS MBX Alloc Failed");
rc = -ENOMEM;
goto mem_free;
}
ras_fwlog->fw_loglevel = fwlog_level;
len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
len, LPFC_SLI4_MBX_EMBED);
mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
fwlog_enable);
bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
ras_fwlog->fw_loglevel);
bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
ras_fwlog->fw_buffcount);
bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
/* Update DMA buffer address */
list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
/* Update LPWD address */
mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"6191 RAS Mailbox failed. "
"status %d mbxStatus : x%x", rc,
bf_get(lpfc_mqe_status, &mbox->u.mqe));
mempool_free(mbox, phba->mbox_mem_pool);
rc = -EIO;
goto mem_free;
} else
rc = 0;
mem_free:
if (rc)
lpfc_sli4_ras_dma_free(phba);
return rc;
}
/**
* lpfc_sli4_ras_setup - Check if RAS supported on the adapter
* @phba: Pointer to HBA context object.
*
* Check if RAS is supported on the adapter and initialize it.
**/
void
lpfc_sli4_ras_setup(struct lpfc_hba *phba)
{
/* Check RAS FW Log needs to be enabled or not */
if (lpfc_check_fwlog_support(phba))
return;
lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
LPFC_RAS_ENABLE_LOGGING);
}
/** /**
* lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
@@ -10266,8 +10534,12 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmb; LPFC_MBOXQ_t *pmb;
unsigned long iflag; unsigned long iflag;
/* Disable softirqs, including timers from obtaining phba->hbalock */
local_bh_disable();
/* Flush all the mailbox commands in the mbox system */ /* Flush all the mailbox commands in the mbox system */
spin_lock_irqsave(&phba->hbalock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
/* The pending mailbox command queue */ /* The pending mailbox command queue */
list_splice_init(&phba->sli.mboxq, &completions); list_splice_init(&phba->sli.mboxq, &completions);
/* The outstanding active mailbox command */ /* The outstanding active mailbox command */
@@ -10280,6 +10552,9 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
list_splice_init(&phba->sli.mboxq_cmpl, &completions); list_splice_init(&phba->sli.mboxq_cmpl, &completions);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
/* Enable softirqs again, done with phba->hbalock */
local_bh_enable();
/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
while (!list_empty(&completions)) { while (!list_empty(&completions)) {
list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
@@ -10419,6 +10694,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
lpfc_hba_down_prep(phba); lpfc_hba_down_prep(phba);
/* Disable softirqs, including timers from obtaining phba->hbalock */
local_bh_disable();
lpfc_fabric_abort_hba(phba); lpfc_fabric_abort_hba(phba);
spin_lock_irqsave(&phba->hbalock, flags); spin_lock_irqsave(&phba->hbalock, flags);
@@ -10472,6 +10750,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
kfree(buf_ptr); kfree(buf_ptr);
} }
/* Enable softirqs again, done with phba->hbalock */
local_bh_enable();
/* Return any active mbox cmds */ /* Return any active mbox cmds */
del_timer_sync(&psli->mbox_tmo); del_timer_sync(&psli->mbox_tmo);
@@ -11775,6 +12056,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
} }
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
/* Disable softirqs, including timers from obtaining phba->hbalock */
local_bh_disable();
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
@@ -11788,6 +12072,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
1000) + jiffies; 1000) + jiffies;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Enable softirqs again, done with phba->hbalock */
local_bh_enable();
while (phba->sli.mbox_active) { while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */ /* Check active mailbox complete status every 2ms */
msleep(2); msleep(2);
@@ -11797,9 +12084,13 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
*/ */
break; break;
} }
} else } else {
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Enable softirqs again, done with phba->hbalock */
local_bh_enable();
}
lpfc_sli_mbox_sys_flush(phba); lpfc_sli_mbox_sys_flush(phba);
} }
@@ -13136,7 +13427,6 @@ static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
{ {
bool workposted = false; bool workposted = false;
struct fc_frame_header *fc_hdr;
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
struct lpfc_queue *drq = phba->sli4_hba.dat_rq; struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_tgtport *tgtp;
@@ -13173,9 +13463,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
hrq->RQ_buf_posted--; hrq->RQ_buf_posted--;
memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
/* If a NVME LS event (type 0x28), treat it as Fast path */
fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
/* save off the frame for the word thread to process */ /* save off the frame for the word thread to process */
list_add_tail(&dma_buf->cq_event.list, list_add_tail(&dma_buf->cq_event.list,
&phba->sli4_hba.sp_queue_event); &phba->sli4_hba.sp_queue_event);
@@ -14558,13 +14845,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
int rc, length, status = 0; int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status; uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr; union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
/* sanity check on queue memory */ /* sanity check on queue memory */
if (!cq || !eq) if (!cq || !eq)
return -ENODEV; return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = cq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) if (!mbox)

View File

@@ -886,3 +886,4 @@ int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
int lpfc_sli4_post_status_check(struct lpfc_hba *); int lpfc_sli4_post_status_check(struct lpfc_hba *);
uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *); uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *); uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);

View File

@@ -20,7 +20,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "12.0.0.6" #define LPFC_DRIVER_VERSION "12.0.0.7"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */ /* Used for SLI 2/3 */

Some files were not shown because too many files have changed in this diff Show More