Skip to content

Commit f4345f0

Browse files
committed
Merge tag 'block-6.9-20240510' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe: - NVMe pull request via Keith: - nvme target fixes (Sagi, Dan, Maurizo) - new vendor quirk for broken MSI (Sean) - Virtual boundary fix for a regression in this merge window (Ming) * tag 'block-6.9-20240510' of git://git.kernel.dk/linux: nvmet-rdma: fix possible bad dereference when freeing rsps nvmet: prevent sprintf() overflow in nvmet_subsys_nsid_exists() nvmet: make nvmet_wq unbound nvmet-auth: return the error code to the nvmet_auth_ctrl_hash() callers nvme-pci: Add quirk for broken MSIs block: set default max segment size in case of virt_boundary
2 parents ed44935 + a772178 commit f4345f0

File tree

7 files changed

+29
-21
lines changed

7 files changed

+29
-21
lines changed

block/blk-settings.c

+4-1
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,10 @@ static int blk_validate_limits(struct queue_limits *lim)
188188
* bvec and lower layer bio splitting is supposed to handle the two
189189
* correctly.
190190
*/
191-
if (!lim->virt_boundary_mask) {
191+
if (lim->virt_boundary_mask) {
192+
if (!lim->max_segment_size)
193+
lim->max_segment_size = UINT_MAX;
194+
} else {
192195
/*
193196
* The maximum segment size has an odd historic 64k default that
194197
* drivers probably should override. Just like the I/O size we

drivers/nvme/host/nvme.h

+5
Original file line numberDiff line numberDiff line change
@@ -162,6 +162,11 @@ enum nvme_quirks {
162162
* Disables simple suspend/resume path.
163163
*/
164164
NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
165+
166+
/*
167+
* MSI (but not MSI-X) interrupts are broken and never fire.
168+
*/
169+
NVME_QUIRK_BROKEN_MSI = (1 << 21),
165170
};
166171

167172
/*

drivers/nvme/host/pci.c

+11-3
Original file line numberDiff line numberDiff line change
@@ -2224,6 +2224,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
22242224
.priv = dev,
22252225
};
22262226
unsigned int irq_queues, poll_queues;
2227+
unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY;
22272228

22282229
/*
22292230
* Poll queues don't need interrupts, but we need at least one I/O queue
@@ -2247,8 +2248,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
22472248
irq_queues = 1;
22482249
if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
22492250
irq_queues += (nr_io_queues - poll_queues);
2250-
return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
2251-
PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
2251+
if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
2252+
flags &= ~PCI_IRQ_MSI;
2253+
return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags,
2254+
&affd);
22522255
}
22532256

22542257
static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
@@ -2477,6 +2480,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
24772480
{
24782481
int result = -ENOMEM;
24792482
struct pci_dev *pdev = to_pci_dev(dev->dev);
2483+
unsigned int flags = PCI_IRQ_ALL_TYPES;
24802484

24812485
if (pci_enable_device_mem(pdev))
24822486
return result;
@@ -2493,7 +2497,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
24932497
* interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
24942498
* adjust this later.
24952499
*/
2496-
result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
2500+
if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
2501+
flags &= ~PCI_IRQ_MSI;
2502+
result = pci_alloc_irq_vectors(pdev, 1, 1, flags);
24972503
if (result < 0)
24982504
goto disable;
24992505

@@ -3390,6 +3396,8 @@ static const struct pci_device_id nvme_id_table[] = {
33903396
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
33913397
NVME_QUIRK_DISABLE_WRITE_ZEROES|
33923398
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
3399+
{ PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */
3400+
.driver_data = NVME_QUIRK_BROKEN_MSI },
33933401
{ PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
33943402
.driver_data = NVME_QUIRK_BOGUS_NID, },
33953403
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */

drivers/nvme/target/auth.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -480,7 +480,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
480480
nvme_auth_free_key(transformed_key);
481481
out_free_tfm:
482482
crypto_free_shash(shash_tfm);
483-
return 0;
483+
return ret;
484484
}
485485

486486
int nvmet_auth_ctrl_exponential(struct nvmet_req *req,

drivers/nvme/target/configfs.c

+2-3
Original file line numberDiff line numberDiff line change
@@ -757,10 +757,9 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
757757
bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
758758
{
759759
struct config_item *ns_item;
760-
char name[4] = {};
760+
char name[12];
761761

762-
if (sprintf(name, "%u", nsid) <= 0)
763-
return false;
762+
snprintf(name, sizeof(name), "%u", nsid);
764763
mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
765764
ns_item = config_group_find_item(&subsys->namespaces_group, name);
766765
mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);

drivers/nvme/target/core.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -1686,7 +1686,8 @@ static int __init nvmet_init(void)
16861686
if (!buffered_io_wq)
16871687
goto out_free_zbd_work_queue;
16881688

1689-
nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
1689+
nvmet_wq = alloc_workqueue("nvmet-wq",
1690+
WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
16901691
if (!nvmet_wq)
16911692
goto out_free_buffered_work_queue;
16921693

drivers/nvme/target/rdma.c

+4-12
Original file line numberDiff line numberDiff line change
@@ -474,12 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
474474
return 0;
475475

476476
out_free:
477-
while (--i >= 0) {
478-
struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
479-
480-
list_del(&rsp->free_list);
481-
nvmet_rdma_free_rsp(ndev, rsp);
482-
}
477+
while (--i >= 0)
478+
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
483479
kfree(queue->rsps);
484480
out:
485481
return ret;
@@ -490,12 +486,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
490486
struct nvmet_rdma_device *ndev = queue->dev;
491487
int i, nr_rsps = queue->recv_queue_size * 2;
492488

493-
for (i = 0; i < nr_rsps; i++) {
494-
struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
495-
496-
list_del(&rsp->free_list);
497-
nvmet_rdma_free_rsp(ndev, rsp);
498-
}
489+
for (i = 0; i < nr_rsps; i++)
490+
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
499491
kfree(queue->rsps);
500492
}
501493

0 commit comments

Comments
 (0)