Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
49266f5
selftests/mm temporary fix of hmm infinite loop
PlaidCat Oct 22, 2024
841e13e
net: mana: Add support for Multi Vports on Bare metal
PlaidCat Jun 9, 2025
c98862f
tools: hv: Enable debug logs for hv_kvp_daemon
PlaidCat Jun 9, 2025
7c366db
RDMA/mana_ib: use the correct page size for mapping user-mode doorbel…
shreeya-patel98 Aug 19, 2025
65884ec
RDMA/mana_ib: use the correct page table index based on hardware page…
shreeya-patel98 Aug 19, 2025
c804b02
scsi: storvsc: Increase the timeouts to storvsc_timeout
kerneltoast Aug 13, 2025
0e3aa27
Drivers: hv: Allow vmbus_sendpacket_mpb_desc() to create multiple ranges
shreeya-patel98 Aug 25, 2025
4a937fb
hv_netvsc: Use vmbus_sendpacket_mpb_desc() to send VMBus messages
shreeya-patel98 Aug 25, 2025
a20a21f
hv_netvsc: Preserve contiguous PFN grouping in the page buffer array
shreeya-patel98 Aug 25, 2025
382ea2f
hv_netvsc: Remove rmsg_pgcnt
shreeya-patel98 Aug 25, 2025
090e980
Drivers: hv: vmbus: Remove vmbus_sendpacket_pagebuffer()
shreeya-patel98 Aug 25, 2025
ce7e943
hv_netvsc: Use VF's tso_max_size value when data path is VF
shreeya-patel98 Aug 25, 2025
4cfa5f5
net: mana: Allow tso_max_size to go up-to GSO_MAX_SIZE
shreeya-patel98 Aug 25, 2025
9d4aa4f
net: mana: Add debug logs in MANA network driver
shreeya-patel98 Aug 25, 2025
1cb1d92
net: mana: Change the function signature of mana_get_primary_netdev_rcu
shreeya-patel98 Aug 28, 2025
73f058c
RDMA/mana_ib: Handle net event for pointing to the current netdev
shreeya-patel98 Aug 28, 2025
d4756c1
net: mana: Support holes in device list reply msg
shreeya-patel98 Aug 28, 2025
2342054
net: mana: Switch to page pool for jumbo frames
shreeya-patel98 Aug 28, 2025
d78ae8f
net: mana: Expose additional hardware counters for drop and TC via et…
shreeya-patel98 Sep 3, 2025
5fbac25
net: mana: Add handler for hardware servicing events
shreeya-patel98 Sep 3, 2025
f6a8102
net: mana: Handle Reset Request from MANA NIC
shreeya-patel98 Sep 3, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 3 additions & 62 deletions drivers/hv/channel.c
Original file line number Diff line number Diff line change
Expand Up @@ -1077,68 +1077,10 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
EXPORT_SYMBOL(vmbus_sendpacket);

/*
* vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
* packets using a GPADL Direct packet type. This interface allows you
* to control notifying the host. This will be useful for sending
* batched data. Also the sender can control the send flags
* explicitly.
*/
int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
struct hv_page_buffer pagebuffers[],
u32 pagecount, void *buffer, u32 bufferlen,
u64 requestid)
{
int i;
struct vmbus_channel_packet_page_buffer desc;
u32 descsize;
u32 packetlen;
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;

if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;

/*
* Adjust the size down since vmbus_channel_packet_page_buffer is the
* largest size we support
*/
descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
((MAX_PAGE_BUFFER_COUNT - pagecount) *
sizeof(struct hv_page_buffer));
packetlen = descsize + bufferlen;
packetlen_aligned = ALIGN(packetlen, sizeof(u64));

/* Setup the descriptor */
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc.reserved = 0;
desc.rangecount = pagecount;

for (i = 0; i < pagecount; i++) {
desc.range[i].len = pagebuffers[i].len;
desc.range[i].offset = pagebuffers[i].offset;
desc.range[i].pfn = pagebuffers[i].pfn;
}

bufferlist[0].iov_base = &desc;
bufferlist[0].iov_len = descsize;
bufferlist[1].iov_base = buffer;
bufferlist[1].iov_len = bufferlen;
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);

return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);

/*
* vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
* vmbus_sendpacket_mpb_desc - Send one or more multi-page buffer packets
* using a GPADL Direct packet type.
* The buffer includes the vmbus descriptor.
* The desc argument must include space for the VMBus descriptor. The
* rangecount field must already be set.
*/
int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *desc,
Expand All @@ -1160,7 +1102,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */
desc->reserved = 0;
desc->rangecount = 1;

bufferlist[0].iov_base = desc;
bufferlist[0].iov_len = desc_size;
Expand Down
54 changes: 48 additions & 6 deletions drivers/infiniband/hw/mana/device.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,38 @@ static const struct ib_device_ops mana_ib_dev_ops = {
ib_ind_table),
};

static int mana_ib_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct mana_ib_dev *dev = container_of(this, struct mana_ib_dev, nb);
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
struct gdma_context *gc = dev->gdma_dev->gdma_context;
struct mana_context *mc = gc->mana.driver_data;
struct net_device *ndev;

/* Only process events from our parent device */
if (event_dev != mc->ports[0])
return NOTIFY_DONE;

switch (event) {
case NETDEV_CHANGEUPPER:
ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
/*
* RDMA core will setup GID based on updated netdev.
* It's not possible to race with the core as rtnl lock is being
* held.
*/
ib_device_set_netdev(&dev->ib_dev, ndev, 1);

/* mana_get_primary_netdev() returns ndev with refcount held */
netdev_put(ndev, &dev->dev_tracker);

return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}

static int mana_ib_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
Expand Down Expand Up @@ -85,18 +117,17 @@ static int mana_ib_probe(struct auxiliary_device *adev,
dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
dev->ib_dev.dev.parent = mdev->gdma_context->dev;

rcu_read_lock(); /* required to get primary netdev */
ndev = mana_get_primary_netdev_rcu(mc, 0);
ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
if (!ndev) {
rcu_read_unlock();
ret = -ENODEV;
ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
goto free_ib_device;
}
ether_addr_copy(mac_addr, ndev->dev_addr);
addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
rcu_read_unlock();
/* mana_get_primary_netdev() returns ndev with refcount held */
netdev_put(ndev, &dev->dev_tracker);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
goto free_ib_device;
Expand All @@ -110,17 +141,25 @@ static int mana_ib_probe(struct auxiliary_device *adev,
}
dev->gdma_dev = &mdev->gdma_context->mana_ib;

dev->nb.notifier_call = mana_ib_netdev_event;
ret = register_netdevice_notifier(&dev->nb);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
ret);
goto deregister_device;
}

ret = mana_ib_gd_query_adapter_caps(dev);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d",
ret);
goto deregister_device;
goto deregister_net_notifier;
}

ret = mana_ib_create_eqs(dev);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
goto deregister_device;
goto deregister_net_notifier;
}

ret = mana_ib_gd_create_rnic_adapter(dev);
Expand Down Expand Up @@ -149,6 +188,8 @@ static int mana_ib_probe(struct auxiliary_device *adev,
mana_ib_gd_destroy_rnic_adapter(dev);
destroy_eqs:
mana_ib_destroy_eqs(dev);
deregister_net_notifier:
unregister_netdevice_notifier(&dev->nb);
deregister_device:
mana_gd_deregister_device(dev->gdma_dev);
free_ib_device:
Expand All @@ -164,6 +205,7 @@ static void mana_ib_remove(struct auxiliary_device *adev)
xa_destroy(&dev->qp_table_wq);
mana_ib_gd_destroy_rnic_adapter(dev);
mana_ib_destroy_eqs(dev);
unregister_netdevice_notifier(&dev->nb);
mana_gd_deregister_device(dev->gdma_dev);
ib_dealloc_device(&dev->ib_dev);
}
Expand Down
8 changes: 4 additions & 4 deletions drivers/infiniband/hw/mana/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem

create_req->length = umem->length;
create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
create_req->gdma_page_type = order_base_2(page_sz) - MANA_PAGE_SHIFT;
create_req->page_count = num_pages_total;

ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n",
Expand Down Expand Up @@ -511,13 +511,13 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
PAGE_SHIFT;
prot = pgprot_writecombine(vma->vm_page_prot);

ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot,
ret = rdma_user_mmap_io(ibcontext, vma, pfn, PAGE_SIZE, prot,
NULL);
if (ret)
ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret);
else
ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n",
pfn, gc->db_page_size, ret);
ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %lu, ret %d\n",
pfn, PAGE_SIZE, ret);

return ret;
}
Expand Down
2 changes: 2 additions & 0 deletions drivers/infiniband/hw/mana/mana_ib.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@ struct mana_ib_dev {
struct gdma_queue **eqs;
struct xarray qp_table_wq;
struct mana_ib_adapter_caps adapter_caps;
netdevice_tracker dev_tracker;
struct notifier_block nb;
};

struct mana_ib_wq {
Expand Down
Loading