Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
K
kernel
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Package Registry
Container Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
SIG
cloud
rpms
kernel
Commits
46fc9f7f
Commit
46fc9f7f
authored
1 year ago
by
Rocky Automation
Browse files
Options
Downloads
Patches
Plain Diff
import kernel-5.14.0-362.8.1.el9_3
parent
4c4de2bc
No related branches found
Branches containing commit
No related tags found
Tags containing commit
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
SOURCES/4-gVNIC-IOMMU.patch
+68
-258
68 additions, 258 deletions
SOURCES/4-gVNIC-IOMMU.patch
SPECS/kernel.spec
+6
-5
6 additions, 5 deletions
SPECS/kernel.spec
with
74 additions
and
263 deletions
SOURCES/4-gVNIC-IOMMU.patch
+
68
−
258
View file @
46fc9f7f
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 7d4b61e5db47..ace0e9b8b913 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -360,8 +360,9 @@
static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
* supporting all features of AMD IOMMU page tables like level skipping
* and full 64 bit address spaces.
*/
-static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
LIST_HEAD(freelist);
@@ -369,39 +370,47 @@
static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
u64 __pte, *pte;
int ret, i, count;
- BUG_ON(!IS_ALIGNED(iova, size));
- BUG_ON(!IS_ALIGNED(paddr, size));
+ BUG_ON(!IS_ALIGNED(iova, pgsize));
+ BUG_ON(!IS_ALIGNED(paddr, pgsize));
ret = -EINVAL;
if (!(prot & IOMMU_PROT_MASK))
goto out;
- count = PAGE_SIZE_PTE_COUNT(size);
- pte = alloc_pte(dom, iova, size, NULL, gfp, &updated);
+ while (pgcount > 0) {
+ count = PAGE_SIZE_PTE_COUNT(pgsize);
+ pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
- ret = -ENOMEM;
- if (!pte)
- goto out;
+ ret = -ENOMEM;
+ if (!pte)
+ goto out;
- for (i = 0; i < count; ++i)
- free_clear_pte(&pte[i], pte[i], &freelist);
+ for (i = 0; i < count; ++i)
+ free_clear_pte(&pte[i], pte[i], &freelist);
- if (!list_empty(&freelist))
- updated = true;
+ if (!list_empty(&freelist))
+ updated = true;
- if (count > 1) {
- __pte = PAGE_SIZE_PTE(__sme_set(paddr), size);
- __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
- } else
- __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ if (count > 1) {
+ __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
+ __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ } else
+ __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
- if (prot & IOMMU_PROT_IR)
- __pte |= IOMMU_PTE_IR;
- if (prot & IOMMU_PROT_IW)
- __pte |= IOMMU_PTE_IW;
+ if (prot & IOMMU_PROT_IR)
+ __pte |= IOMMU_PTE_IR;
+ if (prot & IOMMU_PROT_IW)
+ __pte |= IOMMU_PTE_IW;
- for (i = 0; i < count; ++i)
- pte[i] = __pte;
+ for (i = 0; i < count; ++i)
+ pte[i] = __pte;
+
+ iova += pgsize;
+ paddr += pgsize;
+ pgcount--;
+ if (mapped)
+ *mapped += pgsize;
+ }
ret = 0;
@@ -426,17 +435,18 @@
static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
return ret;
}
-static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
- unsigned long iova,
- size_t size,
- struct iommu_iotlb_gather *gather)
+static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long long unmapped;
unsigned long unmap_size;
u64 *pte;
+ size_t size = pgcount << __ffs(pgsize);
- BUG_ON(!is_power_of_2(size));
+ BUG_ON(!is_power_of_2(pgsize));
unmapped = 0;
@@ -448,14 +458,14 @@
static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++)
pte[i] = 0ULL;
+ } else {
+ return unmapped;
}
iova = (iova & ~(unmap_size - 1)) + unmap_size;
unmapped += unmap_size;
}
- BUG_ON(unmapped && !is_power_of_2(unmapped));
-
return unmapped;
}
@@ -514,8 +524,8 @@
static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v1_flush_ops;
- pgtable->iop.ops.map = iommu_v1_map_page;
- pgtable->iop.ops.unmap = iommu_v1_unmap_page;
+ pgtable->iop.ops.map_pages = iommu_v1_map_pages;
+ pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
return &pgtable->iop;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 3484058665c5..c9187a3b45fe 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2156,13 +2156,13 @@
static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
- if (ops->map)
+ if (ops->map_pages)
domain_flush_np_cache(domain, iova, size);
}
-static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, size_t page_size, int iommu_prot,
- gfp_t gfp)
+static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int iommu_prot, gfp_t gfp, size_t *mapped)
{
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@@ -2178,8 +2178,10 @@
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- if (ops->map)
- ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
+ if (ops->map_pages) {
+ ret = ops->map_pages(ops, iova, paddr, pgsize,
+ pgcount, prot, gfp, mapped);
+ }
return ret;
}
@@ -2205,9 +2207,9 @@
static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
iommu_iotlb_gather_add_range(gather, iova, size);
}
-static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
- size_t page_size,
- struct iommu_iotlb_gather *gather)
+static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@@ -2217,9 +2219,10 @@
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
(domain->iop.mode == PAGE_MODE_NONE))
return 0;
- r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
+ r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0;
- amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size);
+ if (r)
+ amd_iommu_iotlb_gather_add_page(dom, gather, iova, r);
return r;
}
@@ -2381,8 +2384,8 @@
const struct iommu_ops amd_iommu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
- .map = amd_iommu_map,
- .unmap = amd_iommu_unmap,
+ .map_pages = amd_iommu_map_pages,
+ .unmap_pages = amd_iommu_unmap_pages,
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
.iova_to_phys = amd_iommu_iova_to_phys,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 160735484465..5655da9cd236 100644
--- a/drivers/net/ethernet/google/gve/gve.h
...
...
@@ -287,7 +77,7 @@ index 50b384910c83..ce574d097e28 100644
data[i++] = tmp_rx_skb_alloc_fail +
tmp_rx_buf_alloc_fail +
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index e4e98aa7745f..
1f55137722b0
100644
index e4e98aa7745f..
f24b19dd3453
100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -35,6 +35,12 @@
static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
...
...
@@ -322,10 +112,14 @@ index e4e98aa7745f..1f55137722b0 100644
/* Allocate one page per Rx queue slot. Each page is split into two
* packet buffers, when possible we "page flip" between the two.
@@ -135,7 +146,33 @@
static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
goto alloc_err;
}
@@ -132,11 +143,51 @@
static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
&rx->data.data_ring[i]);
if (err)
- goto alloc_err;
+ goto alloc_err_rda;
+ }
+
+ if (!rx->data.raw_addressing) {
+ for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) {
+ struct page *page = alloc_page(GFP_KERNEL);
...
...
@@ -343,20 +137,36 @@ index e4e98aa7745f..1f55137722b0 100644
+ page_ref_add(page, INT_MAX - 1);
+ rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX;
+ }
+
}
+
}
return slots;
-alloc_err:
+
+alloc_err_qpl:
+ /* Fully free the copy pool pages. */
+ while (j--) {
+ page_ref_sub(rx->qpl_copy_pool[j].page,
+ rx->qpl_copy_pool[j].pagecnt_bias - 1);
+ put_page(rx->qpl_copy_pool[j].page);
+ }
alloc_err:
+
+ /* Do not fully free QPL pages - only remove the bias added in this
+ * function with gve_setup_rx_buffer.
+ */
+ while (i--)
+ page_ref_sub(rx->data.page_info[i].page,
+ rx->data.page_info[i].pagecnt_bias - 1);
+
+ gve_unassign_qpl(priv, rx->data.qpl->id);
+ rx->data.qpl = NULL;
+
+ return err;
+
+alloc_err_rda:
while (i--)
gve_rx_free_buffer(&priv->pdev->dev,
@@ -146,12 +183,11 @@
static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
&rx->data.page_info[i],
@@ -146,12 +197,11 @@
static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
{
...
...
@@ -372,7 +182,7 @@ index e4e98aa7745f..1f55137722b0 100644
}
static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
@@ -181,10 +21
7
,22 @@
static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
@@ -181,10 +2
3
1,22 @@
static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
GFP_KERNEL);
if (!rx->data.data_ring)
return -ENOMEM;
...
...
@@ -396,7 +206,7 @@ index e4e98aa7745f..1f55137722b0 100644
}
rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */
@@ -236,6 +28
4
,9 @@
static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
@@ -236,6 +2
9
8,9 @@
static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
rx->q_resources = NULL;
abort_filled:
gve_rx_unfill_pages(priv, rx);
...
...
@@ -406,7 +216,7 @@ index e4e98aa7745f..1f55137722b0 100644
abort_with_slots:
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
@@ -292,30 +3
43
,47 @@
static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
@@ -292,30 +3
57
,47 @@
static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
return PKT_HASH_TYPE_L2;
}
...
...
@@ -443,13 +253,13 @@ index e4e98aa7745f..1f55137722b0 100644
+ skb = napi_alloc_skb(napi, 0);
+ if (!skb)
+ return NULL;
- skb = ctx->skb_head;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
+
+ // We will never chain more than two SKBs: 2 * 16 * 2k > 64k
+ // which is why we do not need to chain by using skb->next
+ skb_shinfo(ctx->skb_tail)->frag_list = skb;
+
- skb = ctx->skb_head;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
+ ctx->skb_tail = skb;
+ num_frags = 0;
+ }
...
...
@@ -468,7 +278,7 @@ index e4e98aa7745f..1f55137722b0 100644
}
static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
@@ -363,6 +4
31
,92 @@
gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
@@ -363,6 +4
45
,92 @@
gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
return skb;
}
...
...
@@ -561,7 +371,7 @@ index e4e98aa7745f..1f55137722b0 100644
static struct sk_buff *
gve_rx_qpl(struct device *dev, struct net_device *netdev,
struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info,
@@ -377,7 +5
31
,7 @@
gve_rx_qpl(struct device *dev, struct net_device *netdev,
@@ -377,7 +5
45
,7 @@
gve_rx_qpl(struct device *dev, struct net_device *netdev,
* choice is to copy the data out of it so that we can return it to the
* device.
*/
...
...
@@ -570,7 +380,7 @@ index e4e98aa7745f..1f55137722b0 100644
skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
/* No point in recycling if we didn't get the skb */
if (skb) {
@@ -386,116 +54
0
,23 @@
gve_rx_qpl(struct device *dev, struct net_device *netdev,
@@ -386,116 +5
5
4,23 @@
gve_rx_qpl(struct device *dev, struct net_device *netdev,
gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
}
} else {
...
...
@@ -692,7 +502,7 @@ index e4e98aa7745f..1f55137722b0 100644
if (skb) {
u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++;
@@ -504,29 +5
65
,25 @@
static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
@@ -504,29 +5
79
,25 @@
static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
u64_stats_update_end(&rx->statss);
}
} else {
...
...
@@ -735,7 +545,7 @@ index e4e98aa7745f..1f55137722b0 100644
skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
page_info, len, napi, data_slot);
}
@@ -534,101 +
591
,113 @@
static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
@@ -534,101 +
605
,113 @@
static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
return skb;
}
...
...
@@ -768,21 +578,18 @@ index e4e98aa7745f..1f55137722b0 100644
- napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+ struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+ bool is_first_frag = ctx->frag_cnt == 0;
+
+ bool is_only_frag = is_first_frag && is_last_frag;
- if (unlikely(!gve_rx_ctx_init(ctx, rx)))
- goto skb_alloc_fail;
+ bool is_only_frag = is_first_frag && is_last_frag;
+ if (unlikely(ctx->drop_pkt))
+ goto finish_frag;
- while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
- /* Prefetch two packet buffers ahead, we will need it soon. */
- page_info = &rx->data.page_info[(idx + 2) & rx->mask];
- va = page_info->page_address + page_info->page_offset;
+ if (unlikely(ctx->drop_pkt))
+ goto finish_frag;
- prefetch(page_info->page); /* Kernel page struct. */
- prefetch(va); /* Packet header. */
- prefetch(va + 64); /* Next cacheline too. */
+ if (desc->flags_seq & GVE_RXF_ERR) {
+ ctx->drop_pkt = true;
+ cnts->desc_err_pkt_cnt++;
...
...
@@ -790,7 +597,9 @@ index e4e98aa7745f..1f55137722b0 100644
+ goto finish_frag;
+ }
- len = gve_rx_get_fragment_size(ctx, desc);
- prefetch(page_info->page); /* Kernel page struct. */
- prefetch(va); /* Packet header. */
- prefetch(va + 64); /* Next cacheline too. */
+ if (unlikely(frag_size > rx->packet_buffer_size)) {
+ netdev_warn(priv->dev, "Unexpected frag size %d, can't exceed %d, scheduling reset",
+ frag_size, rx->packet_buffer_size);
...
...
@@ -800,19 +609,7 @@ index e4e98aa7745f..1f55137722b0 100644
+ goto finish_frag;
+ }
- page_info = &rx->data.page_info[idx];
- data_slot = &rx->data.data_ring[idx];
- page_bus = rx->data.raw_addressing ?
- be64_to_cpu(data_slot->addr) - page_info->page_offset :
- rx->data.qpl->page_buses[idx];
- dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, PAGE_SIZE, DMA_FROM_DEVICE);
-
- skb = gve_rx_skb(priv, rx, page_info, napi, len, data_slot);
- if (!skb) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_skb_alloc_fail++;
- u64_stats_update_end(&rx->statss);
- goto skb_alloc_fail;
- len = gve_rx_get_fragment_size(ctx, desc);
+ /* Prefetch two packet buffers ahead, we will need it soon. */
+ page_info = &rx->data.page_info[(idx + 2) & rx->mask];
+ va = page_info->page_address + page_info->page_offset;
...
...
@@ -836,7 +633,20 @@ index e4e98aa7745f..1f55137722b0 100644
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_skb_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
+
- page_info = &rx->data.page_info[idx];
- data_slot = &rx->data.data_ring[idx];
- page_bus = rx->data.raw_addressing ?
- be64_to_cpu(data_slot->addr) - page_info->page_offset :
- rx->data.qpl->page_buses[idx];
- dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, PAGE_SIZE, DMA_FROM_DEVICE);
-
- skb = gve_rx_skb(priv, rx, page_info, napi, len, data_slot);
- if (!skb) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_skb_alloc_fail++;
- u64_stats_update_end(&rx->statss);
- goto skb_alloc_fail;
+ napi_free_frags(napi);
+ ctx->drop_pkt = true;
+ goto finish_frag;
...
...
@@ -922,7 +732,7 @@ index e4e98aa7745f..1f55137722b0 100644
}
bool gve_rx_work_pending(struct gve_rx_ring *rx)
@@ -704,36 +77
3
,39 @@
static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
@@ -704,36 +7
8
7,39 @@
static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
netdev_features_t feat)
{
...
...
@@ -935,10 +745,10 @@ index e4e98aa7745f..1f55137722b0 100644
- struct gve_rx_desc *desc;
- u64 bytes = 0;
+ u32 work_done = 0;
+
+ struct gve_rx_desc *desc = &rx->desc.desc_ring[idx];
- desc = &rx->desc.desc_ring[idx];
+ struct gve_rx_desc *desc = &rx->desc.desc_ring[idx];
+
+ // Exceed budget only if (and till) the inflight packet is consumed.
while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
- work_done < budget) {
...
...
@@ -986,7 +796,7 @@ index e4e98aa7745f..1f55137722b0 100644
}
if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold)
@@ -741,8 +8
13
,10 @@
static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
@@ -741,8 +8
27
,10 @@
static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
if (work_done) {
u64_stats_update_begin(&rx->statss);
...
...
@@ -999,7 +809,7 @@ index e4e98aa7745f..1f55137722b0 100644
u64_stats_update_end(&rx->statss);
}
@@ -767,7 +8
41
,7 @@
static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
@@ -767,7 +8
55
,7 @@
static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
}
gve_rx_write_doorbell(priv, rx);
...
...
This diff is collapsed.
Click to expand it.
SPECS/kernel.spec
+
6
−
5
View file @
46fc9f7f
...
...
@@ -959,7 +959,6 @@ Patch1: patch-%{patchversion}-redhat.patch
Patch999999: linux-kernel-test.patch
Patch1000000: 1000-debrand-some-messages.patch
Patch1000001: 4-gVNIC-IOMMU.patch
Patch1000002: nvme_bring-back-autoremoval-of-deleted-namespaces-during-sequential-scan.patch
# END OF PATCH DEFINITIONS
...
...
@@ -1698,7 +1697,6 @@ ApplyOptionalPatch patch-%{patchversion}-redhat.patch
ApplyOptionalPatch 1000-debrand-some-messages.patch
ApplyOptionalPatch linux-kernel-test.patch
ApplyPatch 4-gVNIC-IOMMU.patch
ApplyPatch nvme_bring-back-autoremoval-of-deleted-namespaces-during-sequential-scan.patch
# END OF PATCH APPLICATIONS
...
...
@@ -3757,10 +3755,13 @@ fi
#
#
%changelog
* Wed Nov 29 2023 RESF Sideline (Backporter) <releng+sideline@rockylinux.org> - 5.14.0-362.8.1.0.2
- nvme: bring back auto-removal of deleted namespaces during sequential scan (Ronnie Sahlberg) [2357]
* Wed Dec 06 2023 RESF Sideline (Backporter) <releng+sideline@rockylinux.org> - 5.14.0-362.8.1.0.2
- gve: Do not fully free QPL pages on prefill errors (Ronnie Sahlberg)
- gve: Fix error return code in gve_prefill_rx_pages() (Ronnie Sahlberg)
- gve: Reduce alloc and copy costs in the GQ rx path (Ronnie Sahlberg)
- gve: Fix spelling mistake 'droping' -> 'dropping' (Ronnie Sahlberg)
* Wed
Nov
0
8
2023 Release Engineering <releng@rockylinux.org> - 5.14.0-362.8.1
* Wed
Dec
0
6
2023 Release Engineering <releng@rockylinux.org> - 5.14.0-362.8.1
- Porting to 9.3, debranding and Rocky branding
* Tue Oct 03 2023 Jan Stancek <jstancek@redhat.com> [5.14.0-362.8.1.el9_3]
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment