@@ -2042,6 +2042,7 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
2042
2042
{
2043
2043
struct cmd_config_rx_mode_flag *res = parsed_result;
2044
2044
portid_t pid;
2045
+ int k;
2045
2046
2046
2047
if (!all_ports_stopped()) {
2047
2048
printf("Please stop all ports first\n");
@@ -2142,6 +2143,10 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
2142
2143
return;
2143
2144
}
2144
2145
port->dev_conf.rxmode.offloads = rx_offloads;
2146
+ /* Apply Rx offloads configuration */
2147
+ for (k = 0; k < port->dev_info.max_rx_queues; k++)
2148
+ port->rx_conf[k].offloads =
2149
+ port->dev_conf.rxmode.offloads;
2145
2150
}
2146
2151
2147
2152
init_port_config();
@@ -4354,6 +4359,17 @@ csum_show(int port_id)
4354
4359
}
4355
4360
}
4356
4361
4362
+ static void
4363
+ cmd_config_queue_tx_offloads(struct rte_port *port)
4364
+ {
4365
+ int k;
4366
+
4367
+ /* Apply queue tx offloads configuration */
4368
+ for (k = 0; k < port->dev_info.max_rx_queues; k++)
4369
+ port->tx_conf[k].offloads =
4370
+ port->dev_conf.txmode.offloads;
4371
+ }
4372
+
4357
4373
static void
4358
4374
cmd_csum_parsed(void *parsed_result,
4359
4375
__attribute__((unused)) struct cmdline *cl,
@@ -4438,6 +4454,7 @@ cmd_csum_parsed(void *parsed_result,
4438
4454
ports[res->port_id].dev_conf.txmode.offloads &=
4439
4455
(~csum_offloads);
4440
4456
}
4457
+ cmd_config_queue_tx_offloads(&ports[res->port_id]);
4441
4458
}
4442
4459
csum_show(res->port_id);
4443
4460
@@ -4589,6 +4606,7 @@ cmd_tso_set_parsed(void *parsed_result,
4589
4606
printf("TSO segment size for non-tunneled packets is %d\n",
4590
4607
ports[res->port_id].tso_segsz);
4591
4608
}
4609
+ cmd_config_queue_tx_offloads(&ports[res->port_id]);
4592
4610
4593
4611
/* display warnings if configuration is not supported by the NIC */
4594
4612
rte_eth_dev_info_get(res->port_id, &dev_info);
@@ -4744,6 +4762,7 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
4744
4762
"if outer L3 is IPv4; not necessary for IPv6\n");
4745
4763
}
4746
4764
4765
+ cmd_config_queue_tx_offloads(&ports[res->port_id]);
4747
4766
cmd_reconfig_device_queue(res->port_id, 1, 1);
4748
4767
}
4749
4768
@@ -8348,32 +8367,32 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
8348
8367
__attribute__((unused)) void *data)
8349
8368
{
8350
8369
int ret = -ENOTSUP;
8351
- uint16_t rx_mode = 0;
8370
+ uint16_t vf_rxmode = 0;
8352
8371
struct cmd_set_vf_rxmode *res = parsed_result;
8353
8372
8354
8373
int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0;
8355
8374
if (!strcmp(res->what,"rxmode")) {
8356
8375
if (!strcmp(res->mode, "AUPE"))
8357
- rx_mode |= ETH_VMDQ_ACCEPT_UNTAG;
8376
+ vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG;
8358
8377
else if (!strcmp(res->mode, "ROPE"))
8359
- rx_mode |= ETH_VMDQ_ACCEPT_HASH_UC;
8378
+ vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC;
8360
8379
else if (!strcmp(res->mode, "BAM"))
8361
- rx_mode |= ETH_VMDQ_ACCEPT_BROADCAST;
8380
+ vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST;
8362
8381
else if (!strncmp(res->mode, "MPE",3))
8363
- rx_mode |= ETH_VMDQ_ACCEPT_MULTICAST;
8382
+ vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST;
8364
8383
}
8365
8384
8366
8385
RTE_SET_USED(is_on);
8367
8386
8368
8387
#ifdef RTE_LIBRTE_IXGBE_PMD
8369
8388
if (ret == -ENOTSUP)
8370
8389
ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id,
8371
- rx_mode , (uint8_t)is_on);
8390
+ vf_rxmode , (uint8_t)is_on);
8372
8391
#endif
8373
8392
#ifdef RTE_LIBRTE_BNXT_PMD
8374
8393
if (ret == -ENOTSUP)
8375
8394
ret = rte_pmd_bnxt_set_vf_rxmode(res->port_id, res->vf_id,
8376
- rx_mode , (uint8_t)is_on);
8395
+ vf_rxmode , (uint8_t)is_on);
8377
8396
#endif
8378
8397
if (ret < 0)
8379
8398
printf("bad VF receive mode parameter, return code = %d \n",
0 commit comments