Merge tag 'for-4.21/block-20181221' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe:
"This is the main pull request for block/storage for 4.21.
Larger than usual, it was a busy round with lots of goodies queued up.
Most notable is the removal of the old IO stack, which has been a long
time coming. No new features for a while, everything coming in this
week has all been fixes for things that were previously merged.
This contains:
- Use atomic counters instead of semaphores for mtip32xx (Arnd)
- Cleanup of the mtip32xx request setup (Christoph)
- Fix for circular locking dependency in loop (Jan, Tetsuo)
- bcache (Coly, Guoju, Shenghui)
* Optimizations for writeback caching
* Various fixes and improvements
- nvme (Chaitanya, Christoph, Sagi, Jay, me, Keith)
* host and target support for NVMe over TCP
* Error log page support
* Support for separate read/write/poll queues
* Much improved polling
* discard OOM fallback
* Tracepoint improvements
- lightnvm (Hans, Hua, Igor, Matias, Javier)
* Igor added packed metadata to pblk. Now drives without metadata
per LBA can be used as well.
* Fix from Geert on uninitialized value on chunk metadata reads.
* Fixes from Hans and Javier to pblk recovery and write path.
* Fix from Hua Su to fix a race condition in the pblk recovery
code.
* Scan optimization added to pblk recovery from Zhoujie.
* Small geometry cleanup from me.
- Conversion of the last few drivers that used the legacy path to
blk-mq (me)
- Removal of legacy IO path in SCSI (me, Christoph)
- Removal of legacy IO stack and schedulers (me)
- Support for much better polling, now without interrupts at all.
blk-mq adds support for multiple queue maps, which enables us to
have a map per type. This in turn enables nvme to have separate
completion queues for polling, which can then be interrupt-less.
Also means we're ready for async polled IO, which is hopefully
coming in the next release.
- Killing of (now) unused block exports (Christoph)
- Unification of the blk-rq-qos and blk-wbt wait handling (Josef)
- Support for zoned testing with null_blk (Masato)
- sx8 conversion to per-host tag sets (Christoph)
- IO priority improvements (Damien)
- mq-deadline zoned fix (Damien)
- Ref count blkcg series (Dennis)
- Lots of blk-mq improvements and speedups (me)
- sbitmap scalability improvements (me)
- Make core inflight IO accounting per-cpu (Mikulas)
- Export timeout setting in sysfs (Weiping)
- Cleanup the direct issue path (Jianchao)
- Export blk-wbt internals in block debugfs for easier debugging
(Ming)
- Lots of other fixes and improvements"
* tag 'for-4.21/block-20181221' of git://git.kernel.dk/linux-block: (364 commits)
kyber: use sbitmap add_wait_queue/list_del wait helpers
sbitmap: add helpers for add/del wait queue handling
block: save irq state in blkg_lookup_create()
dm: don't reuse bio for flushes
nvme-pci: trace SQ status on completions
nvme-rdma: implement polling queue map
nvme-fabrics: allow user to pass in nr_poll_queues
nvme-fabrics: allow nvmf_connect_io_queue to poll
nvme-core: optionally poll sync commands
block: make request_to_qc_t public
nvme-tcp: fix spelling mistake "attepmpt" -> "attempt"
nvme-tcp: fix endianess annotations
nvmet-tcp: fix endianess annotations
nvme-pci: refactor nvme_poll_irqdisable to make sparse happy
nvme-pci: only set nr_maps to 2 if poll queues are supported
nvmet: use a macro for default error location
nvmet: fix comparison of a u16 with -1
blk-mq: enable IO poll if .nr_queues of type poll > 0
blk-mq: change blk_mq_queue_busy() to blk_mq_queue_inflight()
blk-mq: skip zero-queue maps in blk_mq_map_swqueue
...
This commit is contained in:
@@ -857,13 +857,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
}
|
||||
|
||||
if (ha->mqenable) {
|
||||
if (shost_use_blk_mq(vha->host)) {
|
||||
tag = blk_mq_unique_tag(cmd->request);
|
||||
hwq = blk_mq_unique_tag_to_hwq(tag);
|
||||
qpair = ha->queue_pair_map[hwq];
|
||||
} else if (vha->vp_idx && vha->qpair) {
|
||||
qpair = vha->qpair;
|
||||
}
|
||||
tag = blk_mq_unique_tag(cmd->request);
|
||||
hwq = blk_mq_unique_tag_to_hwq(tag);
|
||||
qpair = ha->queue_pair_map[hwq];
|
||||
|
||||
if (qpair)
|
||||
return qla2xxx_mqueuecommand(host, cmd, qpair);
|
||||
@@ -1464,7 +1460,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
|
||||
goto eh_reset_failed;
|
||||
}
|
||||
err = 2;
|
||||
if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
|
||||
if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1)
|
||||
!= QLA_SUCCESS) {
|
||||
ql_log(ql_log_warn, vha, 0x800c,
|
||||
"do_reset failed for cmd=%p.\n", cmd);
|
||||
@@ -3159,7 +3155,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
goto probe_failed;
|
||||
}
|
||||
|
||||
if (ha->mqenable && shost_use_blk_mq(host)) {
|
||||
if (ha->mqenable) {
|
||||
/* number of hardware queues supported by blk/scsi-mq*/
|
||||
host->nr_hw_queues = ha->max_qpairs;
|
||||
|
||||
@@ -3271,25 +3267,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
|
||||
|
||||
if (ha->mqenable) {
|
||||
bool mq = false;
|
||||
bool startit = false;
|
||||
|
||||
if (QLA_TGT_MODE_ENABLED()) {
|
||||
mq = true;
|
||||
if (QLA_TGT_MODE_ENABLED())
|
||||
startit = false;
|
||||
}
|
||||
|
||||
if ((ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) &&
|
||||
shost_use_blk_mq(host)) {
|
||||
mq = true;
|
||||
if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
|
||||
startit = true;
|
||||
}
|
||||
|
||||
if (mq) {
|
||||
/* Create start of day qpairs for Block MQ */
|
||||
for (i = 0; i < ha->max_qpairs; i++)
|
||||
qla2xxx_create_qpair(base_vha, 5, 0, startit);
|
||||
}
|
||||
/* Create start of day qpairs for Block MQ */
|
||||
for (i = 0; i < ha->max_qpairs; i++)
|
||||
qla2xxx_create_qpair(base_vha, 5, 0, startit);
|
||||
}
|
||||
|
||||
if (ha->flags.running_gold_fw)
|
||||
@@ -6952,11 +6940,12 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
|
||||
{
|
||||
int rc;
|
||||
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
|
||||
struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
|
||||
|
||||
if (USER_CTRL_IRQ(vha->hw))
|
||||
rc = blk_mq_map_queues(&shost->tag_set);
|
||||
rc = blk_mq_map_queues(qmap);
|
||||
else
|
||||
rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0);
|
||||
rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user