Skip to content

Commit 7a82ee6

Browse files
nrockershousenGitHub Enterprise
authored and
GitHub Enterprise
committedDec 10, 2024
TECHPUBS-4452: HPE Slingshot Host Software User Guide (#101)
* TECHPUBS-4452: HPE Slingshot Host Software User Guide * TECHPUBS-4452: grammar, wording, and review feedback
1 parent 1353b7e commit 7a82ee6

37 files changed

+978
-3
lines changed
 

‎.spelling

+34
Original file line numberDiff line numberDiff line change
@@ -20,20 +20,29 @@
2020
200gb
2121
200Gbps
2222
2-port
23+
4KiB
2324
802.1Q
25+
ack
26+
ACKs
27+
acknowledgment
28+
acknowledgments
2429
adminStatus
2530
AMA
2631
AMAs
2732
amdgpu
33+
AMOs
2834
aarch64
2935
AERs
3036
all2all
3137
analytics
38+
analyze
39+
analyzing
3240
Ansible
3341
API
3442
APIs
3543
arm64
3644
Arista
45+
artifact
3746
Artifactory
3847
ASIC
3948
ASIC_0
@@ -61,6 +70,7 @@ BOS
6170
bootprep
6271
catalog
6372
Cassini
73+
Center
6474
Ceph
6575
CFS
6676
CFS-based
@@ -126,6 +136,7 @@ deallocation
126136
debugfs
127137
default.yml
128138
defragmented
139+
deregistering
129140
dgnettest
130141
diags
131142
diskless
@@ -162,6 +173,7 @@ EC_TRNSNT_S
162173
EC_UNCOR_NS
163174
EC_UNCOR_S
164175
EPEL
176+
enablement
165177
ENOMEM
166178
ENOENT
167179
eth0
@@ -174,6 +186,7 @@ EX235a
174186
failback
175187
Failback
176188
failover
189+
favoring
177190
fi_info
178191
fi_pingpong
179192
Flavored
@@ -183,6 +196,7 @@ GbE
183196
Gbps
184197
Gen4
185198
gc_thresh
199+
GDRCopy
186200
Git
187201
Gitea
188202
GPCNeT
@@ -197,6 +211,7 @@ HealthCheck
197211
HeartBeat
198212
heatsink
199213
highpriority
214+
high_empty
200215
hodagd
201216
honor
202217
honoring
@@ -216,10 +231,13 @@ hsn_traffic
216231
hugepages
217232
Hugepages
218233
Jenkinsfile
234+
IB-over-Ethernet
219235
IBM
220236
ifcfg
221237
IMAGE_NAME
222238
image.rpmlist
239+
incast
240+
inflight
223241
initramfs
224242
initrd
225243
int
@@ -242,6 +260,7 @@ Keycloak
242260
keycloak_group
243261
keycloak_passwd
244262
keypair
263+
Kfabric
245264
kfi
246265
kfi1
247266
kfi2
@@ -255,6 +274,8 @@ libcxi
255274
Libfabric
256275
libfabric
257276
libfabric-devel
277+
Libfabric-to-NCCL
278+
Libfabric-to-RCCL
258279
libpals
259280
limits
260281
Linux
@@ -270,13 +291,15 @@ LNM
270291
Loadbalance
271292
loadbalance
272293
localtime
294+
lockup
273295
LOG_DEBUG
274296
LogLevelMax
275297
LOG_NOTICE
276298
LOG_INFO
277299
LOG_WARN
278300
Loopback
279301
loopback
302+
low_empty
280303
low-noise-mode
281304
lownoise-service
282305
Lua
@@ -296,16 +319,19 @@ Mellanox
296319
memhooks
297320
Memhooks
298321
metadata
322+
misconfigurations
299323
modprobe
300324
mountpoints
301325
MOFED
302326
MPI
327+
MPI-3
303328
MPI-3.1
304329
MPIR
305330
MpiDefault
306331
MpiParams
307332
mpiexec
308333
mpirun
334+
MRs
309335
msr-safe
310336
munged*
311337
multisocket
@@ -352,6 +378,7 @@ nil
352378
node-identity
353379
nodename
354380
non-CFS
381+
non-HPE
355382
non-VLAN
356383
nonprivileged
357384
Nonprivileged
@@ -362,6 +389,7 @@ NVIDIA
362389
ntp
363390
OData
364391
ogopogod
392+
onloaded
365393
OOM
366394
OPA
367395
OpenMPI
@@ -387,6 +415,7 @@ Podman
387415
PMI
388416
PMIx
389417
pmix
418+
preemptive
390419
prepended
391420
prepends
392421
ProLiant
@@ -444,6 +473,7 @@ serdes
444473
SEQUENCE_ERROR
445474
shadow
446475
SharePoint
476+
Shmem
447477
SHS
448478
shs-docs
449479
shs-version
@@ -486,6 +516,7 @@ TCTs
486516
TEMPLATE_NAME
487517
tmpfs
488518
TLV
519+
toolkits
489520
traceback
490521
tunable
491522
tunables
@@ -547,6 +578,8 @@ Zypper
547578
2.x
548579
3.x
549580
4.x
581+
5.0.x
582+
5.1.2
550583
cos-2.x
551584
sle15spx
552585
SSHOT1.2.1
@@ -571,6 +604,7 @@ S-9009
571604
S-9010
572605
S-9011
573606
S-9012
607+
S-9929
574608

575609

576610
#
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<!DOCTYPE map PUBLIC "-//OASIS//DTD DITA Map//EN" "map.dtd">
3+
<map id="shs_relnotes">
4+
<title>HPE Slingshot Host Software User Guide (S-9014) (@product_version@)</title>
5+
<topicmeta>
6+
<shortdesc>This publication describes user procedures for SHS.</shortdesc>
7+
<data name="pubsnumber" value="S-9014"></data>
8+
<data name="edition" value="SHS Software Release @product_version@"></data>
9+
</topicmeta>
10+
<topicref href="VeRsIoN.md" format="mdita" />
11+
<topicref href="user/about.md" format="mdita" />
12+
<topichead>
13+
<topicmeta>
14+
<navtitle>HPE Slingshot NIC overview</navtitle>
15+
</topicmeta>
16+
<topicref href="user/hardware_overview.md" format="mdita" />
17+
<topicref href="user/libfabric_and_the_hpe_slingshot_nic_offloads.md" format="mdita" />
18+
<topicref href="user/memory_registration.md" format="mdita" />
19+
<topicref href="user/hardware_offload_capabilities.md" format="mdita" />
20+
<topicref href="user/software_architecture.md" format="mdita" />
21+
<topicref href="user/performance_counters.md" format="mdita" />
22+
<topicref href="user/ip_networking_considerations.md" format="mdita" />
23+
</topichead>
24+
<topichead>
25+
<topicmeta>
26+
<navtitle>HPE Slingshot NIC Libfabric</navtitle>
27+
</topicmeta>
28+
<topicref href="user/user_configurable_libfabric_environment_variables.md" format="mdita">
29+
<topicref href="user/rdma_messaging_and_relationship_to_environment_settings.md" format="mdita"/>
30+
<topicref href="user/memory_cache_monitor_settings.md" format="mdita"/>
31+
<topicref href="user/endpoint_receive_size_attribute.md" format="mdita"/>
32+
<topicref href="user/endpoint_transmit_size_attribute.md" format="mdita"/>
33+
<topicref href="user/completion_queue_size_attribute.md" format="mdita"/>
34+
<topicref href="user/expected_number_of_ranks_and_peers.md" format="mdita"/>
35+
<topicref href="user/tag_matching_mode_settings.md" format="mdita"/>
36+
<topicref href="user/rendezvous_protocol_configuration.md" format="mdita"/>
37+
</topicref>
38+
<topicref href="user/debug_performance_and_failure_issues.md" format="mdita"/>
39+
</topichead>
40+
<topicref href="user/application_software_overview.md" format="mdita">
41+
<topicref href="user/hpe_cray_programming_environment.md" format="mdita"/>
42+
<topicref href="user/nccl.md" format="mdita"/>
43+
<topicref href="user/rccl.md" format="mdita"/>
44+
<topicref href="user/intel_mpi.md" format="mdita"/>
45+
<topicref href="user/daos.md" format="mdita"/>
46+
<topicref href="user/openmpi.md" format="mdita"/>
47+
</topicref>
48+
<topichead>
49+
<topicmeta>
50+
<navtitle>Appendex</navtitle>
51+
</topicmeta>
52+
<topicref href="user/hpe_slingshot_nic_rdma_protocol_and_traffic_classes.md" format="mdita"/>
53+
<topicref href="user/ip_performance_and_configuration_settings.md" format="mdita"/>
54+
<topicref href="user/memory_registration_and_cache_monitors.md" format="mdita"/>
55+
<topicref href="user/libfabric_runtime_configurable_parameters.md" format="mdita"/>
56+
</topichead>
57+
</map>
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
{
2+
"content_type": "htmlzip",
3+
"content_class": "html-default",
4+
"source_system": "git",
5+
"source_system_id": "https://github.com/Cray-HPE/docs-shs",
6+
"source_system_version": "@docs_git_hash@",
7+
"lifecycle": "DRAFT",
8+
"products": ["1013247219","1013083813"],
9+
"product_version": "@product_version@",
10+
"full_title": "HPE Slingshot Host Software User Guide (S-9014) @product_version@",
11+
"description": "This publication includes user procedures for SHS.",
12+
"language_code": "en_US",
13+
"submitter": "nathan.rockershousen@hpe.com",
14+
"company_info": "HPE-green",
15+
"customer_available_date": "",
16+
"content_org": "CMG708"
17+
}

‎docs/portal/developer-portal/Makefile

+1
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,7 @@ dir: $(TMPDIR)/VeRsIoN.md
167167
cp -r performance $(TMPDIR)
168168
cp -r operations $(TMPDIR)
169169
cp -r overview $(TMPDIR)
170+
cp -r user $(TMPDIR)
170171
cp -r images $(TMPDIR)
171172

172173

‎docs/portal/developer-portal/install/kdreg2_introduction.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ Each of the traditional monitors has advantages and disadvantages. Memhooks are
3131
By default, HPE Slingshot uses the memhooks monitor unless set otherwise with the appropriate Libfabric environment variable. Also, HPE guides to select userfaultfd for applications that use NCCL or RCCL collectives libraries as they can hang at scale under memhooks.
3232

3333
To overcome many of the previously described limitations as well as avoiding the need to configure this per-application, HPE introduced kdreg2 as a third memory cache monitor. kdreg2 is provided as a Linux kernel module and uses an open-source licensing model.
34-
As of the date of this note, it ships in the HPE Slingshot host software distribution and is optionally installed.
34+
As of the date of this note, it ships in the HPE Slingshot Host Software distribution and is optionally installed.
3535
Future releases may install this by default, and eventually HPE expects HPE Slingshot NIC Libfabric provider to select kdreg2 by default instead of memhooks.
3636

3737
kdreg2 uses kernel mechanisms to monitor mapping changes and provides synchronous notification to the memory registration cache. It can report changes at the byte level to any memory within the application’s virtual address space.

‎docs/portal/developer-portal/troubleshoot/cxi/Common_CXI_core_error_messages.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ echo "e4000003,80000000" > /sys/class/cxi/cxi0/device/err_flgs_irqa/hni_pml/no_p
110110

111111
## `tct_tbl_dealloc` errors
112112

113-
This error occurs when, under certain conditions, the HPE Slingshot host software stack does not take proper precautions to prevent the HPE Slingshot 200 GbE NIC from entering an error state. An example of such a condition that may initiate this error - is a fabric event causing packet transfers to be significantly delayed. Normal NIC and fabric operation is not expected to initiate this error.
113+
This error occurs when, under certain conditions, the HPE Slingshot Host Software stack does not take proper precautions to prevent the HPE Slingshot 200 GbE NIC from entering an error state. An example of such a condition that may initiate this error - is a fabric event causing packet transfers to be significantly delayed. Normal NIC and fabric operation is not expected to initiate this error.
114114

115115
The following is an example of this error:
116116

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
# About this publication
2+
3+
This document provides an overview of the HPE Slingshot NIC software environment
4+
for application users. It includes background information on the "theory of operations" to offer context for product configuration and troubleshooting. This document supplements the configuration and troubleshooting information found in the product documentation.
5+
6+
Tuning guidance discussed here is specific to each system or application, so consider your intended application workload and system configuration. For example, the HPE Cray Programming Environment runtime middleware (MPI and SHMEM) sets default values, as detailed in this document and in the Cray PE documentation.
7+
Users may need to adjust settings for non-HPE Cray Software, such as open-source MPI stacks that may not have tuned values, and for specific applications.
8+
9+
Default environment settings are rarely changed to avoid unintended impacts during upgrades. Therefore, users are encouraged to evaluate whether adjusting environment variables will improve performance. Tuning environment settings is also useful when a specific application is failing or running slowly.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# Configure application software
2+
3+
This section provides guidance on configuring application for the HPE Slingshot NIC using the environment variables previously described to share best-known methods from HPE and other users.
4+
The needs of specific applications with specific data sets may always vary from these guidelines.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Completion queue size attribute (`FI_CXI_DEFAULT_CQ_SIZE`)
2+
3+
This variable specifies the maximum number of entries in the CXI provider completion queue. This is used for various software and hardware event queues to generate Libfabric completion events.
4+
While the size of the software queues may grow dynamically, hardware event queue sizes are static. If the hardware event queue is undersized, it will fill quicker than expected, and the next operation targeting a full event queue will result in the message operation being dropped and flow control triggered. Flow control results in expensive, side-band, CXI provider internal messaging to recover from which can appear as lockup to the user.
5+
6+
The provider default is 1024. Users are encouraged to set the completion queue size attribute based on the expected number of inflight RDMA operations to and from a single endpoint. The default provider default value can be set in the application, like MPI, to override the provider default value.
7+
The default CXI provider value is sized to handle the sum of the TX and RX default values, and it must not be below the sum of the TX and RX values if they have been changed from the default. Cray MPI sets this value to a default size of 131072.
8+
This size is partially an artifact of wanting to prevent a condition in earlier versions of cxi provider when overflowing the buffer could cause lock-ups.
9+
This is no longer the case – instead overflowing the buffer will cause slower performance because it triggers flow control.
10+
11+
The impact of sizing this too high is reserving extra host memory that may ultimately be unnecessary.
+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
# DAOS
2+
3+
Intel DAOS sets this list of environment variables for compatibility with the HPE Slingshot Host Software (SHS) stack.
4+
5+
- `setenv("CRT_MRC_ENABLE","1")`
6+
- `setenv("FI_CXI_OPTIMIZED_MRS","0")`
7+
- `setenv("FI_CXI_RX_MATCH_MODE","hybrid")`
8+
- `setenv("FI_MR_CACHE_MONITOR","memhooks")`
9+
- `setenv("FI_CXI_REQ_BUF_MIN_POSTED","8")`
10+
- `setenv("FI_CXI_REQ_BUF_SIZE","8388608")`
11+
- `setenv("FI_CXI_DEFAULT_CQ_SIZE","131072")`
12+
- `setenv("FI_CXI_OFLOW_BUF_SIZE","8388608")`
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
# Debug performance and failure issues
2+
3+
This section describes how to debug applications once a fabric is considered operational.
4+
5+
When a fabric is first being brought up and applications are failing, there can be many issues related to either the network or the host. Transient network failures can impact applications, but debugging whether that is the cause of the application failure is not covered here in depth.
6+
For example, if links are flapping causing an application to fail one would use link debugging procedures.
7+
8+
## Prerequisites
9+
10+
- AMAs must be assigned to every NIC as is done at boot up.
11+
- TCP communication must be working. Even for RDMA communications, the job scheduler and MPI use TCP/IP to set up connections. If a system is being set up, TCP failures can relate to Linux misconfigurations in the ARP cache, static ARP tables, or missing routing rules that should have been set up using `ifroute` during boot up (for nodes with more than one NIC).
12+
- VNI job configuration must be enabled unless the system is running with the “default” `cxi-service`.
13+
- For systems with GPUs, there is a matched set of GPU drivers and programming toolkits for each version of the `cxi` driver as documented in the release notes. Install the GDRCopy library for NVIDIA GPUs.
14+
15+
## Debug steps
16+
17+
The following is a high-level list of actions that can be taken to debug applications:
18+
19+
- Check the _HPE Slingshot Host Software Release Notes_ for known issues or resolved issues. If not running the latest release, check the release notes for the releases that came after the running system.
20+
- Run the application with Libfabric logging, `FI_LOG_LEVEL=warn` and `FI_LOG_PROV=cxi`. The resulting logs provide guidance and will greatly aid the teams in responding to support tickets.
21+
- For memory registration related issues, try running with `kdreg2` memory monitor to see if the issue relates to choice or memory cache monitor. Also one can disable memory registration caching altogether, which will free up an application that is deadlocking but allow it to run instead of locking up. This points to tuning the memory registration cache settings.
22+
- If failures are being caused be hardware matching resource exhaustion, try setting matching mode to hybrid.
23+
- For general concern with resource exhaustion when not running Cray MPI, try setting the environment variables sized larger. Using the Cray MPI settings described below plus setting matching mode to hybrid would help detect whether the default settings are too small for the system or application. If so subsequent testing can help tune the size to avoid too much unneeded memory consumption is desired.
24+
- If the application performed differently after a software upgrade to the HPE Slingshot Host Software, it is possible to try running with the previous version of the Libfabric user space libraries, or even a more recent version of the Libfabric libraries. This might be easier for a user to try than building a new host image. (It is possible that this combination will not work – one can ask the HPE support team whether there are any known incompatibilities.) Today mixing and matching is not always an officially tested or supported combination, but it can be helpful in debugging and sometimes will be perfectly fine in production.
25+
- Trying the alternative rendezvous protocol – if the application is using large message and is performance glacially slow, trying the instructions for the alternative rendezvous protocol may be a useful debug step.
26+
- Collect the NIC counters for an application. See the _HPE Cray Cassini Performance Counters User Guide (S-9929)_ on the [HPE Support Center](https://support.hpe.com/connect/s/?language=en_US) for details.
27+
Counters are collected with Cray MPI, Libfabric, `sysfs`, or LDMS – different deployments use different strategies. Some of these counters are the same as can be collected on the switch port but will be easier for the user. These can present issues such as PCIe congestion, network congestion (pause exertions), and other factors. This can also be of great use by the support teams in responding to tickets.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
# Endpoint receive size attribute (`FI_CXI_DEFAULT_RX_SIZE`)
2+
3+
This attribute sizes the internal receive command and hardware event queues at job start up. Users are encouraged to set the endpoint receive size attribute based on the number of outstanding receive buffers being posted. The primary benefit to changing from the default setting is when running in hybrid match mode which is more common with HPE Slingshot release 2.1.1 and later.
4+
See section on [Tag matching mode settings](tag_matching_mode_settings.md#tag-matching-mode-settings-fi_cxi_rx_match_mode) for more information.
5+
6+
The current default is set to 512 (which is not changed with Cray MPI). Over-specifying can consume more memory, while under-specifying it can cause flow control to be exerted which will reduce performance. When running in “hybrid mode” (see [Tag matching mode settings](tag_matching_mode_settings.md#tag-matching-mode-settings-fi_cxi_rx_match_mode)), over-specifying the amount of hardware receive buffers will force other processes to use a software endpoint.
7+
8+
Libfabric allows applications to suggest a receive attribute size in the `fi_info hints` specific to an application.
9+
If explicitly set, the `cxi` provider will use the size specified rather than the value of this environment variable.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
# Endpoint transmit size attribute (`FI_CXI_DEFAULT_TX_SIZE`)
2+
3+
The endpoint transmit size attribute sizes the internal command and hardware event queues. This controls how many messages are in flight, so at a minimum, users are encouraged to set the endpoint transmit size attribute based on the expected number of inflight, initiator RDMA operations.
4+
5+
If users are going to be issuing more messages than the CXI provider rendezvous limit (`FI_CXI_RDZV_THRESHOLD`), the transmit size attribute must also include the number of outstanding, unexpected rendezvous operations.
6+
For instance, inflight, initiator RDMA operations and outstanding, unexpected rendezvous operations.
7+
See the section on [Rendezvous protocol configuration](rendezvous_protocol_configuration.md#rendezvous-protocol-configuration) for more information.
8+
9+
The current default is 512. Cray MPI sets this to 1024.
10+
11+
If the setting is too high, it can consume more memory than necessary and allow too many messages to be in flight, potentially overwhelming an endpoint. Conversely, if the setting is too low, it can impact performance due to the instantiation of flow control.
12+
In some cases, a low setting may cause a deadlock because an application might post too many transmissions before it can post a receive. These issues are often caused by poorly written applications. This situation typically occurs with the Rendezvous protocol, where too many unexpected messages are received.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
# Expected number of ranks and peers (`FI_UNIVERSE_SIZE`)
2+
3+
The Libfabric `FI_UNIVERSE_SIZE` environment variable defines the number of expected ranks/peers an application needs to communicate with. This value is used in the CXI provider to scale portals flow-control resources used for side-band communication.
4+
5+
The side-band control event queue size is based on the universe size so that more resources are applied as the job scales.
6+
7+
Libfabric sets this default to 1024. The maximum number of ranks when using Cray MPI would be roughly one per core, so 256 ranks would be 256 cores (per NIC). Platforms that have more than 256 cores per NIC may need to increase this size.
8+
9+
If set too small, performance may be impacted by constraining the number of side-band messages that can be outstanding during portals flow-control recovery. If set too large, more memory may be needlessly confused.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
# Hardware offload capabilities
2+
3+
Some key features of the HPE Slingshot NICs include offload capabilities for the following:
4+
5+
- **Asynchronous Message Progression Offload:** HPE Slingshot NICs provide offloading and asynchronous progression of both send and receive messages to improve system performance by enabling the overlap of communications and computation and reducing the use of memory (and memory bandwidth) by the MPI stack. When sending a message, commands are added to a command queue and progress by the NIC without CPU involvement. Large numbers of non-blocking commands can be queued by any process (up to 65,536 if the maximum size of queue is used). Small MPI messages (those with up to 192 bytes of payload data) can be written directly to a command queue, avoiding a round trip across the host interface.
6+
- **Tag Matching:** When receiving a message, MPI point-to-point messages are directed to a list processing engine (LPE) that matches sends to receives. The LPE supports both eager and rendezvous protocols. For small messages, the LPE can match a message to receive request and stream the payload data directly to a user buffer. For large messages, the LPE will match the message to a receive request and then issue a Get request to fetch the payload data directly to a user buffer. Tag-matching offloads are used for both expected and unexpected messages. For unexpected messages, “eager data” that is sent before a receive buffer that is allocated is held in overflow buffers and hardware will match these messages against subsequently posted receive buffers.
7+
- **Completion Events and Triggered Operations:** HPE Slingshot network adapters support counting events and triggered operations that allow complex synchronizations and completion of one or more operations to trigger the issuing of other operations. For example, triggered operations can be used to offload progression of bulk data collectives to the NIC. Counting events and triggered operations are used to implement with low overhead complex CPU and GPU operations using HPE Slingshot-based communications.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
# Hardware overview
2+
3+
The HPE Slingshot NIC is optimized for high performance RDMA communications on HPC and AI applications.
4+
It enables applications to achieve high performance by offloading CPU-intensive activities to the NIC hardware to maximize the overlap of compute and communication.
5+
The NIC simultaneously handles standard Ethernet, and the optimized, non-Ethernet HPE Slingshot Transport (ST) protocol used for offloading RDMA operations.
6+
7+
Sometimes, the NIC works with HPE Slingshot switches to maximize the unique and powerful performance capabilities of the fabric.
8+
For example, large messages can be offloaded to use a rendezvous protocol that allows for out-of-order packet delivery that can be adaptively routed on a packet-by-packet basis to overcome link congestion and achieve high utilization of available bandwidth while retaining application ordering constraints.
9+
Another example is the “fine grained” flow control between the NIC and the fabric allow the NIC to reduce bandwidth for a specific application instead of completely pausing all flows or an entire traffic class as would happen with standard Ethernet.
10+
11+
The ST protocol is accessed through a connectionless software interface to deliver large scalability with lower memory footprint overhead as compared to connection-oriented protocols. The ST protocol is supported by reliable packet delivery features in the NIC and the fabric.
12+
Link-level reliability capabilities such as link-level retry reduce packet drops that otherwise add latency.
13+
The hardware mechanisms work with a software-based end-to-end retry mechanism, typically referred to as the retry handler service, that acts as a “last resort” mechanism to retry packets to the destination before packet drops, for example when there is congestion that has not been fully mitigated.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# HPE Cray Programming Environment
2+
3+
HPE Cray programming environment pre-programs many of the environment variables for the HPE Slingshot NIC libfabric.
4+
These settings have been found to be useful generally for HPE supercomputing customers when using the distributed middleware such as Cray MPI.
5+
The settings are a useful starting point for users of other MPI middleware though each MPI software may have unique attributes that could be better optimized though experimentation.
6+
7+
## MPI settings
8+
9+
- `FI_CXI_RDZV_THRESHOLD = 16384`
10+
- `FI_CXI_RDZV_EAGER_SIZE = 2048`
11+
- `FI_CXI_DEFAULT_CQ_SIZE = 131072`
12+
- `FI_CXI_DEFAULT_TX_SIZE = 1024`
13+
- `FI_CXI_OFLOW_BUF_SIZE = 12582912`
14+
- `FI_CXI_OFLOW_BUF_COUNT = 3`
15+
- `FI_CXI_RX_MATCH_MODE = hardware`
16+
- `FI_CXI_REQ_BUF_MIN_POSTED = 6`
17+
- `FI_CXI_REQ_BUF_SIZE = 12582912`
18+
- `FI_CXI_REQ_BUF_MAX_CACHED = 0`
19+
- `FI_MR_CACHE_MAX_SIZE = -1`
20+
- `FI_MR_CACHE_MAX_COUNT= 524288`
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# HPE Slingshot NIC RDMA protocol and traffic classes
2+
3+
The following tables delineate how various operations/traffic types are adaptively routed.
4+
5+
**Table:** Adaptive routing for various operations and traffic types
6+
7+
| Operation / Traffic Type | Ordered – Per Flow Adaptive Routing | Unordered – Per Packet Adaptive Routing |
8+
|-----------------------------|-------------------------------------|-----------------------------------------|
9+
| MPI message headers | Yes | |
10+
| MPI bulk data transfers | | Yes |
11+
| Shmem Put/Get and MPI-3 RMA | | Yes |
12+
| Lustre bulk data transfers | | Yes |
13+
| TCP/UDP | Yes | |
14+
15+
**Table:** Description of adaptive routing for various operations and traffic types
16+
17+
| Operation / Traffic Type | Ordered – Per Flow Adaptive Routing | Unordered – Per Packet Adaptive Routing |
18+
|----------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
19+
| MPI message headers | MPI semantics require message ordering between two points. Message headers (and a small amount of data) are sent in order. Many MPI messages will be new flows. For example, whenever there is a gap of more than a few microseconds in messages to the same destination. | |
20+
| MPI bulk data transfers | | Once an MPI message has been matched, the bulk data transfer can be unordered. The threshold for this is typically 8K - it can be tuned on a per-application basis. |
21+
| Shmem and MPI-3 RMA | Some special cases such as put-with-signal and reliable AMOs use the “unrestricted” reliability protocol that uses ordered delivery. | Put/Get use a sequence of unordered single packet operations. Address ordering is implemented using fence. |
22+
| Lustre bulk data transfers | | Kfabric uses the same bulk-data primitives as MPI. They provide message ordering with unordered deliver of bulk-data. |
23+
| TCP/UDP | Each new flow selects a new path. Long lived flows are rerouted if they encounter congestion. | |
24+
25+
In the absence of contention, all ordered delivery traffic will be routed minimally, for example in a Dragonfly configuration, taking at most one switch-to-switch one hop in the source group, one global hop, and one hop in the destination group.
26+
In the presence of congestion, the adaptive routing mechanism can cause packets to route non-minimally to avoid the congestion.
27+
In the non-minimal case, the hop count can double, with the packets effectively routing to a randomly chosen intermediate switch and then to the destination.
28+
29+
The HPE Slingshot adaptive routing uses both congestion information and traffic class and Quality of Service (QoS) capabilities to prevent traffic from different applications from interfering with each other.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
# Intel MPI and applications compiled with Intel MPI
2+
3+
Intel MPI documentation states it supports Libfabric version 1.5.0 or later, and it recommends the latest version off of the main branch of Libfabric.
4+
Therefore, HPE Slingshot Libfabric works with Intel MPI.
5+
6+
As of HPE Slingshot release 2.1.2, the HPE Slingshot NIC provider is not yet packaged in the upstream OFI release, and therefore is not in the Intel MPI included version of Libfabric.
7+
The environment variables provided will direct the Intel MPI middleware to use the HPE Slingshot NIC Host Software that is installed as part of the host software packages.
8+
9+
HPE expects the HPE Slingshot NIC Libfabric provider to be part of the Open Source Libfabric distribution as of Libfabric version 1.21, expected in July of 2024.
10+
When Intel MPI incorporates Libfabric 1.21 or beyond, the following instructions may not be necessary.
11+
12+
To point the Intel MPI to the HPE Slingshot NIC installed Libfabric, the following variables must be set:
13+
14+
- Define the Intel MPI 2021.10 path, source `${PATH_TO_IMPI}/setvars.sh`
15+
- `export I_MPI_OFI_LIBRARY=/opt/cray/libfabric/1.15.2.0/lib64/libfabric.so.1`
16+
- `export I_MPI_OFI_PROVIDER=cxi`
17+
- `export I_MPI_OFI_LIBRARY_INTERNAL=0`
18+
19+
The `/opt/cray/libfabric/1.15.2.0/lib64/libfabric.so.1` reference in the previous commands may vary by HPE Slingshot release.
20+
As of release 2.1.2, the above is correct, but there may be newer versions in HPE Slingshot release 2.2 and later.
21+
22+
Intel MPI will not set environment variables by default the way Cray MPI does, so users may also need to set the environment variables for particular applications or particular sizes.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# IP networking considerations
2+
3+
The HPE Slingshot NIC runs IP stacks and the native `cxi` RDMA Libfabric stacks concurrently.
4+
Libfabric stacks require IP networking even if the application primarily uses RDMA; for example, MPI and AI stacks will use IP addresses to locate endpoints, and Slurm job launching subsystems rely on IP networking as well.
5+
6+
IP networking is configured through Linux. Guidance is provided in the HPE Slingshot documentation for how to best configure Linux IP networking for the HPE Slingshot NIC, and systems deployed with HPE’s Performance Cluster Manager (HPCM) or Cray System Management (CSM) often have pre-built base images that pre-populate some of the Linux settings in `system.d` boot scripts.
7+
8+
Users are encouraged to be familiar with the IP configuration settings of importance, such as the following:
9+
10+
- ARP cache sizes and timeouts
11+
- TCP performance turning parameters
12+
- IP routing configuration for multi-NIC systems as per the product documentation
13+
14+
See [IP performance and configuration settings](ip_performance_and_configuration_settings.md#ip-performance-and-configuration-settings) for more information.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
# IP performance and configuration settings
2+
3+
There are many recommended settings in the product documentation for configuring TCP/IP performance.
4+
HPE Slingshot Host Software (SHS) does not set up these Linux settings.
5+
Review the documentation to see if these are settings are in place if TCP/IP performance is a concern.
6+
7+
When measuring performance with IP, use the `iperf2` benchmark instead of `iperf3`. Because IP protocols run on the host to test the networking aspect, the `iperf2` benchmark allows scaling to many cores which is needed to drive the high bandwidth of the HPE Slingshot NIC; `iperf3` is more a single-threaded benchmark.
8+
Because TCP is host dependent, when performance on `iperf2` is below expectations, there can be many host based contributing causes that are unrelated to the fabric or the NIC.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# Libfabric and the HPE Slingshot NIC offloads
2+
3+
The HPE Slingshot NIC exposes its RDMA offload capabilities to software (typically middleware such as MPI or GPU communications collective libraries) using the Libfabric APIs.
4+
Libfabric is an industry standard open-source library for communications that minimizes the impedance mismatch between applications, middleware, and fabric communication hardware by being independent of the underlying networking protocols and specific network device implementation.
5+
Its APIs are tailored to meet the middleware’s transport use cases and requirements while allowing NIC vendors to unlock hardware innovations.
6+
The HPE Slingshot NIC, for example, will deliver low latency, strong progression (overlap of compute and communications), and the ability to scale to tens of thousands of nodes.
7+
8+
Libfabric is a more recent higher level interface as compared to the “Verbs” approach used in InfiniBand and its IB-over-Ethernet (RoCE) derivative. Because it is a higher-level abstraction, closer to the application semantics, the application developer is relieved from having to implement network-technology specific logic for optimized performance.
9+
Libfabric software can run various hardware fabric types without requiring the developer to rewrite most the code to switch from one fabric type to another.
10+
Libfabric is widely adopted today by several NIC vendors and will continue to grow as it is the preferred interface for the Ultra Ethernet Consortium’s low-latency transport standardization direction. Libfabric core provides upward-facing APIs to the applications through network device-specific interfaces called "providers".
11+
The provider for the HPE Slingshot NIC is the `cxi` provider. In addition to NIC-specific providers, Libfabric includes providers for “shared memory” communications (on-node), TCP/IP, and Verbs NICs.
12+
13+
RDMA and OS bypass are fundamental principles to achieving high performance low-latency networking as contrasted with the ubiquitous IP-based “sockets” communications APIs. These can avoid memory copies, enabling asynchronous operations, and direct NIC hardware access from the application.
14+
Consider this example: when an application wants to send data, it starts a send command using commands not dissimilar from IP sockets equivalent calls. The `cxi` provider then optimally selects the hardware acceleration method to achieve the most performance.
15+
16+
Libfabric does not specify what capabilities the underlying hardware must offload. Applications can even run on a traditional IP protocol Libfabric provider albeit without RDMA offload benefits.
17+
When hardware offload is provided, the resources will be finite, especially as compared to host software and CPU cycles. For example, hardware-based completion queues can implement asynchronous processing. This adds performance by letting the processing continue without interrupts to the application or requiring host-based mechanisms to check when communications is complete and memory can be re-used.
18+
But these hardware-based completion queues are a limited resource that must be managed properly.
19+
20+
For the HPE Slingshot 200Gbps NIC, resources are allocated using a `cxi` service that is configured by the privileged user that can access the kernel driver for the NIC. Some `cxi` services are created at boot time, like the service for running the Linux Ethernet stack. Other resources are configured by the host resident job scheduler components, such as PALS (for PBS Pro) or Slurm.
21+
22+
Also, some resources can be managed through user-accessible environment variables. These are used by the NIC provider to configure internal options to help guide how communications can best be optimized for higher performance and lower memory consumption. They are also configurable because there may be different optimization points based on system size, processing type, and specific application attributes such as number of messages in flight at any one time and how reliably can the application’s memory be cached.
23+
24+
Since not all NICs provide the same (or even any) offload capabilities, the need for and importance of Libfabric environment variables varies between different vendor NICs.
25+
On those that provide substantial offloads like the HPE Slingshot NIC, managing and allocating the finite hardware resources will be more important than for NICs that do not provide offloads and rely on consuming what will look like unlimited host and CPU and memory resources and/or rely on additional memory copies to bounce buffers (which consume memory and CPU cycles and increase latency).
26+
This can mean that the failure of an application on the HPE Slingshot NIC but runs with a different NIC’s Libfabric provider can be due to the need to configure the environment variables, but are sometimes misinterpreted as a bug or other fabric issue.

‎docs/portal/developer-portal/user/libfabric_runtime_configurable_parameters.md

+303
Large diffs are not rendered by default.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
# Memory cache monitor settings
2+
3+
NICs that offload RDMA operations (like the HPE Slingshot NIC) enable writing and reading directly from the host memory without the host CPU involved and without memory copies. Extensive software interaction in Linux, Libfabric, and the cxi provider is needed to make this operation work properly and optimally.
4+
To succeed, the NIC must be able to read and write to the current memory location with fresh data that is not changed by other code. To use RDMA, Libfabric provides mechanisms for the application to “register” memory to set up a specific memory location for RDMA operations.
5+
Registration creates a contiguous block of memory to use for communications with the needed protections and mapping to the NIC hardware. The `cxi` provider requires registering memory to use RDMA.
6+
7+
To achieve good RDMA performance, caching memory registrations is important because the act of registering memory is costly in terms of overhead. The memory registration cache reduces the overhead of repeatedly registering and deregistering memory when the cached copy is current.
8+
Libfabric also has extensive support for memory registration caching because it is common for software to register the same memory sections in the course of the application.
9+
Ensuring the data in the MR cache is “fresh” is the role of the memory registration cache monitor. Libfabric supports several methods for monitoring the memory registration cache. These are selectable using the `FI_MR_CACHE_MONITOR` environment variable.
10+
11+
## `FI_MR_CACHE_MONITOR=userfaultfd`
12+
13+
`userfaultfd` (or `uffd`) - is delivered as part of Linux operating system distributions. It is a kernel service for tracking memory mapping changes.
14+
15+
## `FI_MR_CACHE_MONITOR=memhooks`
16+
17+
`memhooks` is distributed as part of Libfabric. Unlike `uffd`, `memhooks` is a user-space function which traps library functions for memory allocation or deallocation.
18+
Both `userfaultfd` and `memhooks` have advantages as well as known shortcomings as memory monitors for MR Cache.
19+
20+
- `memhooks` is set up primarily to monitor dynamic memory allocations, such as applications using `mmap` and `brk` memory functions. Downsides are that it cannot monitor stack allocations or static allocations. The hook instantiation is dependent on load order, linker directives, etc. It deadlocks if the code frees memory such is observed with GPU-style programming locks.
21+
- `userfaultfd` uses a file descriptor for communication which introduces a delay between detection of changes to the memory layout and acknowledgement within Libfabric. This delay can provide for memory corrupting errors since scenarios such as allocation-free-reallocation of the same address in user space are unresolvable. `userfaultfd` is also constrained to operating on page-aligned, full page regions, making it unsuitable for data layout commonly found in applications which utilize SHMEM.
22+
23+
Currently the default setting is memhooks. Importantly, `userfaultfd` is required when running applications with NCCL or RCCL.
24+
25+
## `FI_MR_CACHE_MONITOR=kdreg2`
26+
27+
In addition to `memhooks` and `uffd`, the HPE Slingshot NIC provides another monitor, `kdreg2`, developed by HPE and shipped in the HPE Slingshot Host Software (SHS) package for optional installation. This will be provided as open source as well. The purpose of `kdreg2` is to overcome situations where `memhooks` and `uffd` both fail so that the application can achieve performance by utilizing caching. `kdreg2` is able to monitor static, dynamic, and stack memory. It can support arbitrary alignment. It provides synchronous notification mechanisms. And it can employ extra data to detect allocate/free/reallocate scenarios.
28+
It must be installed into the host OS kernel at image creation time.
29+
30+
`kdreg2` is not installed by default, and HPE encourages system administrators to ensure it is installed so that users can try it and see where it provides benefits and whether it can be a single cache monitor for all applications. Future releases of SHS will change the installation to be done by default, and will likely make this the default memory cache monitor in the future.
31+
32+
There are two Libfabric parameters for the memory registration cache that are of note:
33+
34+
- `FI_MR_CACHE_MAX_SIZE`: This environment variable specifies the maximum size (in bytes) for the cache maintained by the MR Cache Monitor. A setting of `-1` means that there is no maximum. The default in Libfabric is set to the system memory size divided by `cpu_cnt` (the number of processors) divided by 2. HPE Cray Programming Environment sets this to unlimited.
35+
- `FI_MR_CACHE_MAX_COUNT`: This environment variable controls the maximum number of cached MRs that the MR Cache Monitor can maintain. The Libfabric default is 1024. In general, 1024 is too small and the recommendation is to increase this for most applications (For example, HPE Cray MPI). HPE Cray Programming Environment set this to 524288. The increases by the PE software are to ensure better reuse of locations for RDMA operations when there is an active MR Cache, and avoids the performance hit of having a rotating list of locations which exceed the MR Cache limits.
36+
37+
One failure mode when the memory registration cache monitor is not working properly is deadlocks. If an application has problems, one can reduce the `FI_MR_CACHE_MAX_COUNT` to 0, which disables the caching.
38+
This will cause the application to run slower, but if it avoids a problem – most frequently a deadlock – one can try a different monitor.
39+
If performance is worse with cache enabled, then increase the size of the memory registration cache.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Memory registration
2+
3+
High-performance applications use Remote Direct Memory Access (RDMA), allowing the networking stack to read and write data directly into application memory, bypassing normal operating system protection mechanisms.
4+
To facilitate data transfer from application space to the NIC, memory ranges are "registered" through an operation that translates virtual addresses into physical addresses and pins the relevant pages.
5+
6+
HPE Slingshot leverages Libfabric for its communication libraries, which manage the registration of RDMA memory regions.
7+
Libfabric supports the registration of both CPU and GPU memory.
8+
9+
In addition to RDMA transfer capabilities, Libfabric implements a "memory registration cache."
10+
This feature enhances performance by allowing RDMA hardware to reuse previous registrations, thereby reducing computational overhead. This is particularly effective for transfers that repeatedly use the same memory regions and for applications with a limited number of transfer locations.
11+
However, different applications may require different caching optimizations that can impact performance. Libfabric environment variables can be used to tune caching behavior for specific applications.
12+
13+
The memory registration cache requires a mechanism to maintain accurate data.
14+
The Memory Registration Cache Monitor tracks changes in the memory map and invalidates the appropriate cached memory registration entries as needed. Libfabric supports various memory registration cache monitors (`userfaultfd`, `memhooks`, and `kdreg2`), which use different techniques to detect changes in the memory map, accommodating application-specific memory allocation strategies.
15+
These monitors and their associated environment variables are described in more detail in later sections of this document.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
# Memory registration and cache monitors
2+
3+
High performance applications use Remote Direct Memory Access (RDMA) in which the networking stack reads and writes data directly into application memory bypassing normal operating system protection mechanisms.
4+
To orchestrate the transfer of data from the application space to the NIC, the memory ranges are “registered” with an operation that translates the virtual addresses into physical addresses and orchestrates the pinning of the relevant pages. HPE Slingshot uses Libfabric as its basis for its communication libraries, and Libfabric manages the registration of RDMA memory regions.
5+
6+
On top of RDMA transfer capabilities, Libfabric implements a “memory registration cache.” This functionality provides an additional layer of performance by allowing the RDMA hardware to re-use previous registrations and avoid the associated computational cost.
7+
This is especially effective for transfers which repeatedly use to the same pieces of memory and for applications which have a bounded number of transfer locations.
8+
9+
Caching of memory registrations (and the concomitant lazy de-registration) is not a panacea: applications which do not re-use memory locations for data transfer can needlessly incur cache management overhead and experience degraded performance.
10+
Similarly, applications which attempt to register and use large numbers of memory regions may exceed the (finite) capacity of the cache and inadvertently encounter cache thrashing. In these cases, the appropriate remedy may be to either disable the cache or increase the maximum size to match the I/O patterns. Each of these remedies is available via Libfabric environment variables.
11+
12+
The memory registration cache requires an associated entity to maintain accurate data in the cache. The Memory Registration Cache Monitor watches for memory map for changes that require it to then invalidate the appropriate cached memory registration entries. There are several different techniques which can be used to detect changes to the memory map. This given technique may or may not be effective for a specific application and its memory allocation strategies. Libfabric supports the use of various memory registration cache monitors.
13+
14+
Effective monitoring of memory map changes is crucial for proper memory registration cache functionality. Failure to detect changes can result in data transfer to physical memory which is now mapped elsewhere in the process, or even mapped to another process. The result is corruption.
15+
The corruption can result in various execution failures such as hangs due to smashed state, slow or stopped execution from excessive retries because some values being watched are not getting updated, or outright failure of the data transfer.
16+
17+
The traditional memory monitors provided with Libfabric are `userfaultfd` and `memhooks`.
18+
19+
- **userfaultfd:** A Linux Kernel service which gives user-space applications notifications about memory mapping changes via a well-known file descriptor. `userfaultfd` operates on the page level and allows applications to monitor changes to all writable pages within the process’s virtual address space. The descriptor is queried and address ranges which match the `userfaultfd` events are purged from the memory registration cache.
20+
- **memhooks:** A user-space subsystem distributed as part of Libfabric which traps library functions for memory allocation/deallocation calls within an application. When an application allocates memory, `memhooks` tracks that memory and when deallocation occurs, it informs the memory registration cache to purge any corresponding entries. `memhooks` monitors memory which has been dynamically allocated during the execution of the application.
21+
22+
Each of the traditional monitors has advantages and disadvantages. `memhooks` are synchronous with the application but cannot monitor stack or static allocations, and the ability to monitor effectively can depend on load order, linker directives, and other application-specific attributes which affect the trapping mechanisms.
23+
`userfaultfd` can monitor any page-aligned writable memory but cannot provide synchronous notification of memory changes; this means that allocating, freeing, and then reallocating the same address range is unresolvable and error prone.
24+
It also cannot monitor non-page aligned memory as is common in some HPC applications (specifically SHMEM).
25+
26+
By default, HPE Slingshot uses the `memhooks` monitor unless set otherwise with the appropriate Libfabric environment variable.
27+
Also, HPE guides to select `userfaultfd` for applications that use NCCL or RCCL collectives libraries as they can hang at scale under `memhooks`.
28+
29+
To overcome many of the previously described limitations and avoiding the need to configure this per-application, HPE introduced `kdreg2` as a third memory cache monitor. `kdreg2` is a provided as a Linux kernel module and uses an open-source licensing model.
30+
As of the date of this note, it ships in the HPE Slingshot Host Software distribution and is optionally installed. (Future releases may install this by default, and eventually HPE expects HPE Slingshot NIC Libfabric provider to select `kdreg2` by default instead of `memhooks`.)
31+
32+
`kdreg2` uses kernel mechanisms to monitor mapping changes and provides synchronous notification to the memory registration cache. It can report changes at the byte level to any memory within the application’s virtual address space. Unlike `memhooks` it can monitor stack and heap memory, and unlike `userfaultfd` it provides synchronous notification of changes and can monitor partial pages.
33+
34+
HPE knows of no cases where `kdreg2` does not detect memory mapping changes, resulting in misdirected RDMA transfers.
35+
On the contrary, one of the primary goals of `kdreg2` is to enable the performance advantages of memory registration caching for those applications that fail with both `memhooks` and `userfaultfd`.
36+
`kdreg2` has been successfully deployed with enhanced performance for some weather forecasting codes which would otherwise fail to use the traditional monitors.
37+
38+
There can be performance differences between the memory registration cache monitors as described above. In general, HPE has not characterized the range of applications to evaluate performance enhancement observed using `kdreg2` versus `memhooks` and `userfaultfd`.
39+
But in all cases the performance of successful execution with memory registration cache is substantial over execution without caching.
40+
`kdreg2` is HPE’s solution to allow more applications to enjoy the benefits of memory registration cache.
41+
42+
The size of the memory registration cache is one of the most important parameters affecting performance when caching is employed. Since the primary users of Libfabric are communication collective libraries such as SHMEM and MPI, the user may not be aware of the presence of Libfabric nor of its configuration via environment variables.
43+
The default values are set relatively low to accommodate development systems and non-supercomputer environments.
44+
HPE communication collectives, such as Cray MPI, increase the size of the cache by default.
45+
46+
In summary, `kdreg2` is available as an additional memory cache monitor that can enable applications that otherwise use memory registration caching to achieve a performance advantage. For sites that run a mix of HPC applications under the default `memhooks` while setting NCCL and RCCL applications to `userfaultfd`, setting the default configuration to `kdreg2` may simplify operations by eliminating this per-application setting.
47+
48+
Hewlett Packard Enterprise (HPE) recommends that users do the following:
49+
50+
1. Install `kdreg2` in the host images. See "Install `kdreg2`" in the _HPE Slingshot Host Software Installation and Configuration Guide_.
51+
2. Set the default memory monitor to `kdreg2` using the Libfabric environment variable.
52+
3. Increase the Libfabric environment variables for memory registration cache size if indicated, especially for applications that are not using Cray MPI.
+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
2+
# NVIDIA Communications Collectives Libraries (NCCL)
3+
4+
This information is summarized from the “Running NCCL Applications” note that is available on the HPE Support Center (HPESC).
5+
6+
## Prerequisites
7+
8+
- Linux kernel 5.12 or later
9+
- NCCL support for Libfabric: Use the [open-source “OFI Plug-In” in for Libfabric-to-NCCL version 1.6.0](https://github.com/aws/aws-ofi-nccl/releases/tag/v1.6.0)
10+
- HPE Slingshot Host Software (SHS) version 2.1 or greater
11+
- CUDA and NVIDIA GPU Driver supported by the HPE SHS release version
12+
- GDRCopy must be installed
13+
14+
## Environment Settings for Libfabric
15+
16+
- `FI_MR_CACHE_MONITOR=userfaultfd`
17+
- `FI_CXI_DISABLE_HOST_REGISTER=1`
18+
- `FI_CXI_DEFAULT_CQ_SIZE=131072`
19+
- `FI_CXI_DEFAULT_TX_SIZE` must be set to at least as large as the number of outstanding unexpected rendezvous messages that must be supported for the endpoint plus 256; the default of 256 will be sufficient for most applications
20+
- `FI_CXI_RDZV_PROTO=alt_read`
21+
- Enable the [Alternative Rendezvous Protocol](./rendezvous_protocol_configuration.md#alternative-rendezvous-configuration-fi_cxi_rdzv_protoalt_read) (either `sysfs` variable, or job through job scheduler)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
# OpenMPI and applications compiled with OpenMPI
2+
3+
OpenMPI supports Libfabric, and the OpenMPI community has been adding optimizations for the HPE Slingshot NIC to OpenMPI 5.0.x and main branches to the HPE Slingshot NIC as described in the following external paper: [Open MPI for HPE Cray EX Systems](https://www.osti.gov/servlets/purl/1997634).
4+
5+
OpenMPI users need to engage with the community for support on running OpenMPI on the HPE Slingshot NIC.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
2+
# Performance counters
3+
4+
This section describes where to find information on a subset of performance counters that have proven to be useful in analyzing application performance.
5+
6+
## Performance counters for HPE Cray EX systems
7+
8+
See the _HPE Cray Cassini Performance Counters User Guide (S-9929)_ on the [HPE Support Center](https://support.hpe.com/connect/s/?language=en_US) for details on how to use performance counters to collect and analyze performance data.
9+
10+
This guide includes procedures for using HPE Performance Analysis Tools running on HPE Cray EX supercomputer systems.
11+
12+
## Performance counters for HPCM systems
13+
14+
See the "Troubleshooting the HPE Slingshot interconnect" section in the _HPE Performance Cluster Manager Software System Monitoring Guide_ on the [HPE Support Center](https://support.hpe.com/connect/s/?language=en_US) for details on the available performance counters for HPCM systems.
15+
16+
This guide describes the performance counters displayed in the Grafana Performance Dashboard.
+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
# AMD ROCm Communications Collectives Libraries (RCCL)
2+
3+
This information is summarized from the “Running RCCL Applications” note that is available on the HPE Support Center (HPESC).
4+
5+
## Prerequisites
6+
7+
- Linux kernel 5.12 or later
8+
- RCCL Support for Libfabric: Use the [open-source “OFI Plug-In” in for Libfabric-to-RCCL version 1.4](https://github.com/ROCmSoftwarePlatform/aws-ofi-rccl/)
9+
- HPE Slingshot Host Software (SHS) version 2.1 or greater
10+
- ROCm GPU driver and user stack supported by the HPE SHS release version
11+
12+
## Environment settings for Libfabric
13+
14+
- `FI_MR_CACHE_MONITOR=userfaultfd`
15+
- `FI_CXI_DISABLE_HOST_REGISTER=1`
16+
- `FI_CXI_DEFAULT_CQ_SIZE=131072`
17+
- `FI_CXI_DEFAULT_TX_SIZE` must be set to at least as large as the number of outstanding unexpected rendezvous messages that must be supported for the endpoint plus 256; the default of 256 will be sufficient for most applications
18+
- `FI_CXI_RDZV_PROTO=alt_read`
19+
- Enable the [Alternative Rendezvous Protocol](./rendezvous_protocol_configuration.md#alternative-rendezvous-configuration-fi_cxi_rdzv_protoalt_read) (either `sysfs` variable, or job through job scheduler)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
# RDMA messaging and relationship to environment settings
2+
3+
Libfabric programmers use either tagged or untagged two-sided messaging interfaces. Tagged messaging is used for complex data exchanges and multi-step protocols that require strict message ordering.
4+
Untagged messaging has less overhead and is suitable for simpler RDMA communication patterns, such as bulk data transfers and basic request-response. The HPE Slingshot NIC provides extensive hardware offloads for tagged messaging.
5+
6+
Received data goes into a "receive buffer," a memory area from which the target application reads the data once the transmission is complete, without OS memory copies.
7+
Data to be sent comes from a "transmit buffer," an area of application memory from which the NIC reads directly to send data, without involving the host CPU in copying the data.
8+
9+
The CXI provider selects how to send data from among several message protocols based on payload length.
10+
Short messages are transferred using an eager protocol where the entire message payload is sent along with the message header. Long messages are transferred using a rendezvous protocol.
11+
In the rendezvous protocol, a portion of the message payload is sent along with the message header. Once the header is matched to a receive buffer, the remainder of the payload is pulled from the source and matched and ordered by the receiver.
12+
13+
In both methods, if data has been sent before the receive buffer is set up, as might happen if an asynchronous communications pattern is permitted to maximize performance, the message is “unexpected”.
14+
It will sit in a special section of host memory allocated for unexpected messages until its proper destination is learned, at which point it will be copied into the correct receive buffer location.
15+
The HPE Slingshot NIC will offload tag matching for the unexpected message to the NIC, but unexpected messages require host CPU intervention, so performance is optimized by limiting their frequency.
16+
17+
The environment variables relate to configuring the various resources and behavior to offload as many of the above operations onto the NIC to optimize performance as is possible.
18+
The less the host CPU needs to be involved, the more predictably the sending, receiving, and matching can happen using hardware offloads, and the more software-based communications can be targeted instead of triggered unintentionally by hardware exhaustion, the greater the performance will be. For example, when hardware message matching resources become exhausted, messages may be dropped and need to be retransmitted and this impacts performance significantly.
19+
Sometimes hardware resource exhaustion can cause lock-ups, such as when the sender is waiting for receive responses but the receive resources have been exhausted.
20+
21+
The environment variables help ensure that the resources are sized appropriately so the communication performance is optimized, and resources are in line with the application’s communications patterns.
22+
Allocating too many resources can use up too much host memory or sometimes hinder performance, while allocating too few risks performance and resource exhaustion lock up.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
# Rendezvous protocol configuration
2+
3+
For larger message sizes, the rendezvous protocol divides the message into multiple packets, which the sender re-assembles in the correct order. This method involves a "handshake" between the source and destination, allowing large amounts of data to be sent using out-of-order packets. The destination host then matches and orders these packets correctly.
4+
5+
On the HPE Slingshot fabric, the rendezvous protocol leverages fine-grained adaptive routing to load balance data transfer on a packet-by-packet basis across multiple network paths, ensuring high bandwidth.
6+
7+
**Note:** The handshake message can also include "eager" data, as mentioned previously.
8+
9+
Rendezvous uses hardware resources in many areas because it must track all the individual pieces across all the messages.
10+
This includes resources for handling unexpected messages and resources for handling tag matching. Therefore it can be desirable to configure more explicitly how the CXI Provider should handle different message sizes.
11+
12+
These settings include:
13+
14+
- Message size threshold (`FI_CXI_RDZV_THRESHOLD`)
15+
- Minimum payload size (`FI_CXI_RDZV_GET_MIN`)
16+
- Eager data size for rendezvous protocol (`FI_CXI_RDZV_EAGER_SIZE`)
17+
18+
A Send with length less than or equal to `FI_CXI_RDZV_THRESHOLD` plus `FI_CXI_RDZV_GET_MIN` will always be performed using the eager protocol instead of the non-eager rendezvous.
19+
20+
Larger Sends will be performed using the rendezvous protocol with `FI_CXI_RDZV_EAGER_SIZE` bytes of payload sent using the eager protocol and the remainder of the payload read from the source using a Get.
21+
22+
If using these parameters, `FI_CXI_RDZV_THRESHOLD` plus `FI_CXI_RDZV_GET_MIN` must be less than or equal to `FI_CXI_OFLOW_BUF_SIZE`.
23+
24+
For both “hybrid” and “software” tag matching modes, care must be taken to minimize the threshold for rendezvous processing (For instance, `FI_CXI_RDZV_THRESHOLD` and `FI_CXI_RDZV_GET_MIN`).
25+
When running in software endpoint mode, the environment variables `FI_CXI_REQ_BUF_SIZE` and `FI_CXI_REQ_BUF_MIN_POSTED` are used to control the size and number of the eager request buffers posted to handle incoming unmatched messages.
26+
27+
## Alternative rendezvous configuration (`FI_CXI_RDZV_PROTO=alt_read`)
28+
29+
There are two rendezvous protocols in the cxi provider. The “alternative read” protocol rendezvous was developed because HPE found some applications performed poorly with the default protocol. In effect, it is a hybrid type of rendezvous that handles the eager and non-eager portion of the rendezvous data transfer differently.
30+
Note RDMA is used for both.
31+
Initially this was uncovered on AI training applications and with these it is required. Other applications may also benefit from the alternative protocol, but HPE believes that most MPI simulation applications do not require the alternative protocol and may achieve better performance with the default protocol. Hence both options are supported.
32+
33+
Unlike the other environment variables described in this document that are completely in user space, enabling the alternative rendezvous requires changing a setting into the privileged kernel portion of the cxi driver. This means that the alternative protocol cannot be supported just as a user-space runtime settable parameter.
34+
To initiate this protocol on a job-by-job basis, the best option is to utilize recent versions of Slurm and PALS that can configure this variable in the kernel as part of the job launcher on a per-job basis. The `srun` option is `--network=disable_rdzv_get`.
35+
36+
Alternatively, the HPE Slingshot NIC `sysfs` device property can set to turn off `rdzv_get_en` which will remove the ability to run the default rendezvous protocol and is appropriate if the system should always be running the alternative protocols.
37+
38+
The `alt_read` protocol can also be tested without updating hardware settings. To achieve this, run `FI_CXI_RX_MATCH_MODE=software` along with `FI_CXI_RDZV_PROTO=alt_read`. However, performance may not be optimal.
39+
40+
In addition, the alternate rendezvous protocol must be selected via the runtime environment using the variable `FI_CXI_RDZV_PROTO=alt_read`.
41+
The default rendezvous protocol is defined as `FI_CXI_RDZV_PROTO=default`.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
# Software architecture
2+
3+
At a high level, the HPE Slingshot Host Software (SHS) NIC stack relies on the following major elements:
4+
5+
- Libfabric user space libraries.
6+
- Kernel drivers for Linux. Conceptually there is an Ethernet driver, which interfaces to Linux IP networking functions and provides the kernel level services for the HPE Slingshot NIC RDMA transport, that interfaces to the Libfabric provider.
7+
- Other services that run-in user space but are essentially driver functionality, specifically the “retry handler” code.
8+
9+
Each of these has logging that may be useful for fabric debugging.
10+
For user applications, the user space Libfabric logs are the primary useful source of information.
11+
12+
The list in this section is not exhaustive. In addition, there are configuration and diagnostic utilities, and other kernel modules for specific use (software RoCE, Lustre, and an alternative memory cache monitor).
13+
There is also interaction with other parts of a SHS stack, including the job launcher plug-in (Slurm or PALS) that act as the privileged entity to perform per-job NIC driver settings (like security isolation for the `cxi` RDMA protocol), and GPU drivers, and application of configuration settings and utilities integrated into the boot-up using `system.d`.
14+
15+
The HPE Slingshot NIC currently requires an algorithmic MAC address (AMA) to be configured before it can communicate.
16+
This is now handled by a Linux boot script that queries the switch with the Linux `lldptool` and assigns the proper MAC address using Linux networking commands.
17+
Such scripts are provided and also integrated with HPE Performance Cluster Manager (HPCM) and Cray Systems Management (CSM) image configuration tooling. The AMA is essentially a numerical mapping of the NIC to the specific switch port location on the fabric.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
# Tag matching mode settings (`FI_CXI_RX_MATCH_MODE`)
2+
3+
The CXI provider supports three different operational modes for tag matching: hardware, hybrid, and software.
4+
Hardware tag matching (`FI_CXI_RX_MATCH_MODE=hardware`) offers performance benefits as matching on the host is expensive in terms of CPU and memory bandwidth utilization.
5+
Hardware match mode is appropriate for users who can ensure the sum of unexpected messages and posted receives will not exceed the configured hardware receive resource limit for the application. Hardware matching is the default setting.
6+
7+
Hybrid match mode (`FI_CXI_RX_MATCH_MODE=hybrid`) is appropriate for users who are unsure if the sum of unexpected messages and posted receives will not exceed the configured hardware receive resource limit for the application but want to ensure they application still functions if hardware receive resources are consumed.
8+
Hybrid match mode extends hardware match by allowing for an automated transition into software match mode if resources are consumed.
9+
Hybrid is generally a good mode to run in over just hardware matching, but the trade-off is that it requires approximately eight MB per rank/domain of additional host memory consumption even if the rank never transitions use the software match.
10+
11+
Software match mode (`FI_CXI_RX_MATCH_MODE=software`) is appropriate for users who know the sum of unexpected messages and posted receives will exceed the configured hardware receive resource limit for the application.
12+
In software match mode, the CXI provider maintains an unexpected software and posted receive list rather than offloading to hardware.
13+
This avoids having to allocate a hardware receive resource for each unexpected message and posted receive.
14+
This will consume approximately eight MB per rank/domain of additional host memory consumption.
15+
16+
## Hardware matching
17+
18+
When hardware receive resources are consumed (list/match entries or overflow buffers), receive operations can be disabled to ensure that the match order is maintained as the provider attempts to recover hardware resources.
19+
If resources can be recovered, operation can be resumed. Otherwise a different receive match mode is required (hybrid or software).
20+
During the resource recovery process, side-band communication is required to synchronize re-enablement of the receive function. An improperly sized side-band communication event queue can lengthen the recovery time at scale.
21+
22+
For this reason, with SHS release 2.1.1 and beyond, hybrid can be set as the global default, albeit at the cost of host memory and can avoid this situation. See the next section for more information on configuring hybrid match mode.
23+
The current default setting is hardware matching, and Cray MPI uses hardware matching, both largely as a legacy of prior releases when the hybrid matching was not performing optimally.
24+
25+
Running with `FI_LOG_LEVEL=warn` and `FI_LOG_PROV=cxi` will report if this flow control transition is happening. This can be useful to understand other application failures because there are other scenarios where software and hybrid match modes may still enter flow control: if a user is not draining the Libfabric completion queue at a reasonable rate, corresponding hardware events may fill up which will also trigger flow control. In practice, dependent processes (For example, parallel jobs) will most likely be sharing a common receive hardware resource pool.
26+
27+
## Hybrid match mode configuration options
28+
29+
Hybrid Match Mode has further configurability to ensure the process requiring more hardware receive resources does not consume them all which would force all the other processes to be forced into the software match mode.
30+
For example, considered a parallel application which has multiple processes (For instance, ranks) per NIC all sharing the same hardware receive resource pool. Suppose that the application communication pattern results in an all-to-one communication to only a single rank (For example, rank 0) while other ranks may be doing communication among each other.
31+
If the width of the all-to-one exceeds hardware resource consumptions, all ranks on the target NIC will transition to software match mode. The preemptive options help ensure that only rank 0 would transition to software match mode instead of all the ranks on the target NIC.
32+
33+
The `FI_CXI_HYBRID_POSTED_RECV_PREEMPTIVE` and `FI_CXI_HYBRID_UNEXPECTED_MSG_PREEMPTIVE` environment variables enable users to control the transition to software match. One approach is to set the receive size attribute to expected usage, and if this expected usage is exceeded, only the offending endpoints will transition to software match mode.
34+
35+
`FI_CXI_HYBRID_PREEMPTIVE` and `FI_CXI_HYBRID_RECV_PREEMPTIVE` environment variables will force the transition to software match mode when hardware receive resources in the pool are running low. The CXI provider will do a multi-step process to transition the Libfabric endpoint to software match mode.
36+
The benefit of running with these enabled is that the number of endpoints transitioning to software match mode may be smaller when compared to forced software match mode transition due to zero hardware resources available.
37+
These two settings are disabled by default by setting the value to zero.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
# User configurable Libfabric environment variables when using the HPE Slingshot NIC
2+
3+
The `cxi` provider uses various environment parameters to size hardware resources according to the needs of the system, potentially application and message pattern and system processing type and size. If those patterns are constant, the configuration would be globally set, but for systems that have heterogenous applications and processing, sometimes these need to be set on a per-application basis.
4+
Often tuning of environment variables is more relevant for larger clusters and larger jobs.
5+
6+
Some are specific to the `cxi` provider, while others are general Libfabric object attribute size and environment variables important when using the `cxi` provider.
7+
8+
The use of these settings can be relevant in several ways:
9+
10+
- Optimizing host memory use: over sizing some parameters can waste host memory.
11+
- Optimize performance: sizing the resources properly can help performance by maximizing use of the hardware offload capability, minimizing “on-loading” to the host, avoiding the impact of stopping and starting to flow control messages, and maximizing the opportunities re-use host memory caches.
12+
- Overcoming hangs that may be caused by the complex interaction of any specific application’s memory access and communications patterns with the memory caching and other functions in the communications stack.
13+
14+
Environment variable defaults are usually set by the administrator but can be overridden on an application basis – either the middleware like the MPI library, or on an application-by-application basis. Sometimes the optimal default settings are site-specific, a function of the type of processors being employed (GPU vs CPU), the main type of applications being run (For example, GPU-based AI, MPI, or SHMEM), and the scale of the system.
15+
Sometimes, a specific application will need unique settings. These settings can include both general Libfabric parameters as well HPE Slingshot specific ones.
16+
17+
Note that HPE Cray MPI provides default settings for environment variables that are not yet set by other Libfabric stacks (like open-source MPI or GPU communications collectives library) and therefore might need to be explicitly set, particularly for larger cluster sizes.
18+
While it is difficult to provide general guidance across the breadth of different system sizes and types using the HPE Slingshot NIC, sites should consider whether the trade-off of setting a parameter too high at the cost of wasted host memory is more impactful than being too constrained, in which case the HPE Cray MPI settings is a good template thought they are sized for the customer base of large systems.
19+
20+
The most detailed reference for the HPE Slingshot provider is the [manpage for the `cxi` provider](https://github.com/ofiwg/libfabric/blob/main/man/fi_cxi.7.md).
21+
Libfabric software developers (For example, those developing MPI middleware) should see the manpages for the most complete and information.
22+
The intent of the information here is to document the most common settings administrators and specific application users will need to use, partially based on current customer experience.
23+
The [HPE Slingshot NIC RDMA protocol and traffic classes](./hpe_slingshot_nic_rdma_protocol_and_traffic_classes.md#hpe-slingshot-nic-rdma-protocol-and-traffic-classes) table in the appendix of HPE Slingshot NIC specific settable parameters is copied from the `cxi` Libfabric provider man page.
24+
The syntax for these variables starts with `FI_CXI_xxx`.
25+
26+
General Libfabric variables will use the syntax `FI_xxx` and would be explained in the general Libfabric man page available on `ofiwg.github.io`.
27+
Software capabilities have evolved from prior versions of the HPE Slingshot Host Software, and guidance might have been different in the past.
28+
29+
Default settings are also current as of the SHS 2.1.2 release but can change in the future.
30+
31+
Users of the HPE Cray Programming Environment (PE) will find additional information in the PE documentation.

‎docs/portal/scripts/build.sh

+16-1
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ mkdir -m777 -p build/install
5959
mkdir -m777 -p build/release_notes
6060
mkdir -m777 -p build/troubleshoot
6161
mkdir -m777 -p build/admin
62+
mkdir -m777 -p build/user
6263
mkdir -m777 -p build/Markdown
6364

6465
# call the script that flattens the dir structure used to build the HPESC bundle
@@ -114,6 +115,17 @@ dita -i HPE_Slingshot_Host_Software_Administration_Guide.ditamap -o build/PDF/ad
114115
echo "Building Combined Markdown File"
115116
dita -i HPE_Slingshot_Host_Software_Administration_Guide.ditamap --root-chunk-override=to-content -o build/Markdown -f markdown_github
116117

118+
echo "Building SHS User Guide";
119+
120+
# This line builds the HPESC HTML bundle for the user guide
121+
dita -i tmp/HPE_Slingshot_Host_Software_User_Guide.ditamap -o build/user -f HPEscHtml5 && cp HPE_Slingshot_Host_Software_User_Guide.json build/user/publication.json && cd build/user/ && zip -r crs9014en_us.zip ./
122+
cd $THIS_DIR
123+
# This builds the PDF using DITA-OT's default PDF transform
124+
echo "Building PDF"
125+
dita -i HPE_Slingshot_Host_Software_User_Guide.ditamap -o build/PDF/user -f pdf
126+
# This builds the single file Markdown version of the guide. This leverages DITA's "chunking"
127+
echo "Building Combined Markdown File"
128+
dita -i HPE_Slingshot_Host_Software_User_Guide.ditamap --root-chunk-override=to-content -o build/Markdown -f markdown_github
117129

118130
# delete the tmp dir created by the flatten script. The bundle is still in the build subdir
119131
rm -rf tmp/
@@ -123,8 +135,11 @@ mv build/Markdown/HPE_Slingshot_Host_Software_Installation_and_Configuration_Gui
123135
mv build/Markdown/HPE_Slingshot_Host_Software_Release_Notes.md build/
124136
mv build/Markdown/HPE_Slingshot_Host_Software_Troubleshooting_Guide.md build/
125137
mv build/Markdown/HPE_Slingshot_Host_Software_Administration_Guide.md build/
138+
mv build/Markdown/HPE_Slingshot_Host_Software_User_Guide.md build/
139+
126140
rm -rf build/Markdown/*
127141
mv build/HPE_Slingshot_Host_Software_Installation_and_Configuration_Guide.md build/Markdown/
128142
mv build/HPE_Slingshot_Host_Software_Release_Notes.md build/Markdown/
129143
mv build/HPE_Slingshot_Host_Software_Troubleshooting_Guide.md build/Markdown/
130-
mv build/HPE_Slingshot_Host_Software_Administration_Guide.md build/Markdown/
144+
mv build/HPE_Slingshot_Host_Software_Administration_Guide.md build/Markdown/
145+
mv build/HPE_Slingshot_Host_Software_User_Guide.md build/Markdown/

‎hpc-shs-version

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Subproject commit 83adf7ea1e342ac0e8e8e19fc662e795fe778c19

0 commit comments

Comments
 (0)
Please sign in to comment.