Skip to content

Commit

Permalink
Change the verify_in_flight_buffer_pkts to use ingress duthost's buff…
Browse files Browse the repository at this point in the history
…er size. (#15969)

Description of PR
Summary:
The function: verify_in_flight_buffer_pkts is using the egress duthost's buffer size to verify the amount of packets that are transmitted is below the buffer size. That number is greatly influenced by the ingress buffer size when long links are in use as HBM is used with large XOFF threshold. Update this function to use the ingress DUT's buffer size.

Approach
What is the motivation for this PR?
How did you do it?
Updated the function to take ingress_duthost and egress_duthost, instead of just duthost.

How did you verify/test it?
Ran it in my TB:

=========================================================================================================== PASSES ===========================================================================================================
___________________________________________________________________________ test_pfc_pause_single_lossless_prio[multidut_port_info0-yy39top-lc4|3] ___________________________________________________________________________
___________________________________________________________________________ test_pfc_pause_single_lossless_prio[multidut_port_info0-yy39top-lc4|4] ___________________________________________________________________________
___________________________________________________________________________ test_pfc_pause_single_lossless_prio[multidut_port_info1-yy39top-lc4|3] ___________________________________________________________________________
___________________________________________________________________________ test_pfc_pause_single_lossless_prio[multidut_port_info1-yy39top-lc4|4] ___________________________________________________________________________
__________________________________________________________________________________ test_pfc_pause_multi_lossless_prio[multidut_port_info0] ___________________________________________________________________________________
__________________________________________________________________________________ test_pfc_pause_multi_lossless_prio[multidut_port_info1] ___________________________________________________________________________________
_____________________________________________________________________ test_pfc_pause_single_lossless_prio_reboot[multidut_port_info0-cold-yy39top-lc4|3] _____________________________________________________________________
_____________________________________________________________________ test_pfc_pause_single_lossless_prio_reboot[multidut_port_info1-cold-yy39top-lc4|3] _____________________________________________________________________
____________________________________________________________________________ test_pfc_pause_multi_lossless_prio_reboot[multidut_port_info0-cold] _____________________________________________________________________________
____________________________________________________________________________ test_pfc_pause_multi_lossless_prio_reboot[multidut_port_info1-cold] _____________________________________________________________________________
--------------------------------------------------------------- generated xml file: /run_logs/ixia/buffer_size/2024-12-09-23-31-36/tr_2024-12-09-23-31-36.xml ----------------------------------------------------------------
INFO:root:Can not get Allure report URL. Please check logs
--------------------------------------------------------------------------------------------------- live log sessionfinish ---------------------------------------------------------------------------------------------------
01:13:18 __init__.pytest_terminal_summary         L0067 INFO   | Can not get Allure report URL. Please check logs
================================================================================================== short test summary info ===================================================================================================
PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py::test_pfc_pause_single_lossless_prio[multidut_port_info0-yy39top-lc4|3]
PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py::test_pfc_pause_single_lossless_prio[multidut_port_info0-yy39top-lc4|4]
PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py::test_pfc_pause_single_lossless_prio[multidut_port_info1-yy39top-lc4|3]
PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py::test_pfc_pause_single_lossless_prio[multidut_port_info1-yy39top-lc4|4]
PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py::test_pfc_pause_multi_lossless_prio[multidut_port_info0]
PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py::test_pfc_pause_multi_lossless_prio[multidut_port_info1]
PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py::test_pfc_pause_single_lossless_prio_reboot[multidut_port_info0-cold-yy39top-lc4|3]
PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py::test_pfc_pause_single_lossless_prio_reboot[multidut_port_info1-cold-yy39top-lc4|3]
PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py::test_pfc_pause_multi_lossless_prio_reboot[multidut_port_info0-cold]
PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py::test_pfc_pause_multi_lossless_prio_reboot[multidut_port_info1-cold]
SKIPPED [2] snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py:139: Reboot type warm is not supported on cisco-8000 switches
SKIPPED [2] snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py:139: Reboot type fast is not supported on cisco-8000 switches
SKIPPED [2] snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py:199: Reboot type warm is not supported on cisco-8000 switches
SKIPPED [2] snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py:199: Reboot type fast is not supported on cisco-8000 switches
================================================================================== 10 passed, 8 skipped, 14 warnings in 6099.48s (1:41:39) ===================================================================================
sonic@snappi-sonic-mgmt-vanilla-202405-t2:/data/tests$ 
Any platform specific information?

co-authorized by: jianquanye@microsoft.com
  • Loading branch information
rraghav-cisco authored and mssonicbld committed Jan 2, 2025
1 parent aa63166 commit ba65678
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 7 deletions.
12 changes: 7 additions & 5 deletions tests/common/snappi_tests/traffic_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,15 +553,17 @@ def verify_basic_test_flow(flow_metrics,
snappi_extra_params.test_tx_frames = test_tx_frames


def verify_in_flight_buffer_pkts(duthost,
def verify_in_flight_buffer_pkts(egress_duthost,
ingress_duthost,
flow_metrics,
snappi_extra_params, asic_value=None):
"""
Verify in-flight TX bytes of test flows should be held by switch buffer unless PFC delay is applied
for when test traffic is expected to be paused
Args:
duthost (obj): DUT host object
egress_duthost (obj): DUT host object for egress.
ingress_duthost (obj): DUT host object for ingress.
flow_metrics (list): per-flow statistics
snappi_extra_params (SnappiTestParams obj): additional parameters for Snappi traffic
Returns:
Expand All @@ -570,7 +572,7 @@ def verify_in_flight_buffer_pkts(duthost,
data_flow_config = snappi_extra_params.traffic_flow_config.data_flow_config
tx_frames_total = sum(metric.frames_tx for metric in flow_metrics if data_flow_config["flow_name"] in metric.name)
tx_bytes_total = tx_frames_total * data_flow_config["flow_pkt_size"]
dut_buffer_size = get_lossless_buffer_size(host_ans=duthost)
dut_buffer_size = get_lossless_buffer_size(host_ans=ingress_duthost)
headroom_test_params = snappi_extra_params.headroom_test_params
dut_port_config = snappi_extra_params.base_flow_config["dut_port_config"]
pytest_assert(dut_port_config is not None, "Flow port config is not provided")
Expand All @@ -589,7 +591,7 @@ def verify_in_flight_buffer_pkts(duthost,

for peer_port, prios in dut_port_config[0].items():
for prio in prios:
dropped_packets = get_pg_dropped_packets(duthost, peer_port, prio, asic_value)
dropped_packets = get_pg_dropped_packets(egress_duthost, peer_port, prio, asic_value)
pytest_assert(dropped_packets > 0,
"Total TX dropped packets {} should be more than 0".
format(dropped_packets))
Expand All @@ -600,7 +602,7 @@ def verify_in_flight_buffer_pkts(duthost,

for peer_port, prios in dut_port_config[0].items():
for prio in prios:
dropped_packets = get_pg_dropped_packets(duthost, peer_port, prio, asic_value)
dropped_packets = get_pg_dropped_packets(egress_duthost, peer_port, prio, asic_value)
pytest_assert(dropped_packets == 0,
"Total TX dropped packets {} should be 0".
format(dropped_packets))
Expand Down
3 changes: 2 additions & 1 deletion tests/snappi_tests/multidut/pfc/files/multidut_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,8 @@ def run_pfc_test(api,

if test_traffic_pause:
# Verify in flight TX packets count relative to switch buffer size
verify_in_flight_buffer_pkts(duthost=egress_duthost,
verify_in_flight_buffer_pkts(egress_duthost=egress_duthost,
ingress_duthost=ingress_duthost,
flow_metrics=in_flight_flow_metrics,
snappi_extra_params=snappi_extra_params,
asic_value=tx_port['asic_value'])
Expand Down
3 changes: 2 additions & 1 deletion tests/snappi_tests/pfc/files/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,8 @@ def run_pfc_test(api,

if test_traffic_pause:
# Verify in flight TX packets count relative to switch buffer size
verify_in_flight_buffer_pkts(duthost=duthost,
verify_in_flight_buffer_pkts(egress_duthost=duthost,
ingress_duthost=duthost,
flow_metrics=in_flight_flow_metrics,
snappi_extra_params=snappi_extra_params)
else:
Expand Down

0 comments on commit ba65678

Please sign in to comment.