diff -Nru ucx-1.13.0/configure.ac ucx-1.13.1/configure.ac --- ucx-1.13.0/configure.ac 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/configure.ac 2022-09-12 09:22:28.000000000 +0000 @@ -10,7 +10,7 @@ define([ucx_ver_major], 1) # Major version. Usually does not change. define([ucx_ver_minor], 13) # Minor version. Increased for each release. -define([ucx_ver_patch], 0) # Patch version. Increased for a bugfix release. +define([ucx_ver_patch], 1) # Patch version. Increased for a bugfix release. define([ucx_ver_extra], ) # Extra version string. Empty for a general release. define([ts], esyscmd([sh -c "date +%Y%m%d%H%M%S"])) diff -Nru ucx-1.13.0/contrib/test_jenkins.sh ucx-1.13.1/contrib/test_jenkins.sh --- ucx-1.13.0/contrib/test_jenkins.sh 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/contrib/test_jenkins.sh 2022-09-12 09:22:28.000000000 +0000 @@ -77,7 +77,10 @@ # # Set initial port number for client/server applications # -server_port=$((10000 + (1000 * EXECUTOR_NUMBER))) +server_port_range=1000 +server_port_min=$((10500 + EXECUTOR_NUMBER * server_port_range)) +server_port_max=$((server_port_min + server_port_range)) +server_port=${server_port_min} # # Override maven repository path, to cache the downloaded packages accross tests @@ -408,6 +411,11 @@ wait ${pid} || true } +step_server_port() { + # Cycle server_port between (server_port_min)..(server_port_max-1) + server_port=$((server_port + 1)) + server_port=$((server_port >= server_port_max ? server_port_min : server_port)) +} run_client_server_app() { test_exe=$1 @@ -417,7 +425,7 @@ error_emulation=$5 server_port_arg="-p $server_port" - server_port=$((server_port + 1)) + step_server_port affinity_server=$(slice_affinity 0) affinity_client=$(slice_affinity 1) @@ -1230,7 +1238,7 @@ export UCX_ERROR_SIGNALS=SIGILL,SIGSEGV,SIGBUS,SIGFPE,SIGPIPE,SIGABRT export UCX_ERROR_MAIL_TO=$ghprbActualCommitAuthorEmail export UCX_ERROR_MAIL_FOOTER=$JOB_URL/$BUILD_NUMBER/console - export UCX_TCP_PORT_RANGE="$((33000 + EXECUTOR_NUMBER * 100))"-"$((34000 + EXECUTOR_NUMBER * 100))" + export UCX_TCP_PORT_RANGE="$((33000 + EXECUTOR_NUMBER * 1000))-$((33999 + EXECUTOR_NUMBER * 1000))" export UCX_TCP_CM_REUSEADDR=y # Don't cross-connect RoCE devices diff -Nru ucx-1.13.0/debian/changelog ucx-1.13.1/debian/changelog --- ucx-1.13.0/debian/changelog 2022-09-15 18:01:47.000000000 +0000 +++ ucx-1.13.1/debian/changelog 2023-02-07 18:37:50.000000000 +0000 @@ -1,9 +1,21 @@ -ucx (1.13.0-1~22.04.sav0) jammy; urgency=medium +ucx (1.13.1-1~22.04.sav0) jammy; urgency=medium * Backport to Jammy * debian/control: Fix package descriptions typo (spurious "i" after commas) - -- Rob Savoury Thu, 15 Sep 2022 11:01:47 -0700 + -- Rob Savoury Tue, 07 Feb 2023 10:37:50 -0800 + +ucx (1.13.1-1) unstable; urgency=medium + + * New upstream release + + -- Alastair McKinstry Mon, 12 Sep 2022 10:22:17 +0100 + +ucx (1.13.1~rc1-1) experimental; urgency=medium + + * New upstream release + + -- Alastair McKinstry Thu, 25 Aug 2022 07:24:41 +0100 ucx (1.13.0-1) unstable; urgency=medium diff -Nru ucx-1.13.0/NEWS ucx-1.13.1/NEWS --- ucx-1.13.0/NEWS 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/NEWS 2022-09-12 09:22:28.000000000 +0000 @@ -11,6 +11,16 @@ ### Features: ### Bugfixes: +## 1.13.1 (September 7, 2022) +#### Bugfixes +* Fixed flow control protocol in DC transport +* Fixed reordering of pending operations in DC transport +* Fixed relaxed order detection in IB transports +* Fixed build configuration and IB ops references +* Fixed bandwidth calculation during wireup phase +* Fixed TCP transport server port selection +* Minor fixes in CI testing + ## 1.13.0 (July 7, 2022) #### Features ##### Core diff -Nru ucx-1.13.0/src/ucp/wireup/select.c ucx-1.13.1/src/ucp/wireup/select.c --- ucx-1.13.0/src/ucp/wireup/select.c 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/src/ucp/wireup/select.c 2022-09-12 09:22:28.000000000 +0000 @@ -1057,10 +1057,14 @@ /* highest bandwidth with lowest overhead - test a message size of 256KB, * a size which is likely to be used for high-bw memory access protocol, for * how long it would take to transfer it with a certain transport. */ - return 1 / ((UCP_WIREUP_RMA_BW_TEST_MSG_SIZE / - ucs_min(ucp_tl_iface_bandwidth(wiface->worker->context, - &wiface->attr.bandwidth), - remote_iface_attr->bandwidth)) + + double eps = 1e-3; + double local_bw = ucp_tl_iface_bandwidth(wiface->worker->context, + &wiface->attr.bandwidth); + double remote_bw = remote_iface_attr->bandwidth; + double lane_bw = ucs_min(local_bw, remote_bw) + + (eps * (local_bw + remote_bw)); + + return 1 / ((UCP_WIREUP_RMA_BW_TEST_MSG_SIZE / lane_bw) + ucp_wireup_tl_iface_latency(wiface->worker->context, &wiface->attr, remote_iface_attr) + wiface->attr.overhead + diff -Nru ucx-1.13.0/src/uct/ib/base/ib_md.c ucx-1.13.1/src/uct/ib/base/ib_md.c --- ucx-1.13.0/src/uct/ib/base/ib_md.c 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/src/uct/ib/base/ib_md.c 2022-09-12 09:22:28.000000000 +0000 @@ -236,15 +236,17 @@ static uct_ib_md_ops_entry_t UCT_IB_MD_OPS_NAME(verbs); static uct_ib_md_ops_entry_t *uct_ib_ops[] = { -#if defined (HAVE_MLX5_DV) && defined (HAVE_DEVX) +#if defined (HAVE_MLX5_HW) +# if defined (HAVE_MLX5_DV) && defined (HAVE_DEVX) &UCT_IB_MD_OPS_NAME(devx), -#endif -#if defined (HAVE_MLX5_DV) +# endif +# if defined (HAVE_MLX5_DV) &UCT_IB_MD_OPS_NAME(dv), -#endif -#if defined (HAVE_MLX5_HW) && defined (HAVE_VERBS_EXP_H) +# endif +# if defined (HAVE_VERBS_EXP_H) &UCT_IB_MD_OPS_NAME(exp), -#endif +# endif +#endif /* HAVE_MLX5_HW */ &UCT_IB_MD_OPS_NAME(verbs) }; @@ -1577,17 +1579,15 @@ void uct_ib_md_parse_relaxed_order(uct_ib_md_t *md, const uct_ib_md_config_t *md_config) { +#if HAVE_DECL_IBV_ACCESS_RELAXED_ORDERING + md->relaxed_order = (md_config->mr_relaxed_order == UCS_CONFIG_ON) || + ((md_config->mr_relaxed_order == UCS_CONFIG_AUTO) && + ucs_cpu_prefer_relaxed_order()); +#else if (md_config->mr_relaxed_order == UCS_CONFIG_ON) { - if (IBV_ACCESS_RELAXED_ORDERING) { - md->relaxed_order = 1; - } else { - ucs_warn("relaxed order memory access requested but not supported"); - } - } else if (md_config->mr_relaxed_order == UCS_CONFIG_AUTO) { - if (ucs_cpu_prefer_relaxed_order()) { - md->relaxed_order = 1; - } + ucs_warn("relaxed order memory access requested but not supported"); } +#endif } static void uct_ib_check_gpudirect_driver(uct_ib_md_t *md, const char *file, diff -Nru ucx-1.13.0/src/uct/ib/dc/dc_mlx5_ep.c ucx-1.13.1/src/uct/ib/dc/dc_mlx5_ep.c --- ucx-1.13.0/src/uct/ib/dc/dc_mlx5_ep.c 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/src/uct/ib/dc/dc_mlx5_ep.c 2022-09-12 09:22:28.000000000 +0000 @@ -1516,17 +1516,12 @@ * resend FC_HARD_REQ packet to make sure a peer will resend FC_PURE_GRANT * packet in case of failure on the remote FC endpoint */ kh_foreach_key(&iface->tx.fc_hash, ep_key, { - ep = (uct_dc_mlx5_ep_t*)ep_key; - - /* Allocate DCI for the endpoint to schedule the endpoint to DCI wait - * queue if there is free DCI */ - status = uct_dc_mlx5_iface_dci_get(iface, ep); - ucs_assertv((status == UCS_OK) || (status == UCS_ERR_NO_RESOURCE), - "%s", ucs_status_string(status)); - - /* Force DCI scheduling, since FC resources may never become available - * unless we send FC_HARD_REQ packet */ - uct_dc_mlx5_ep_schedule(iface, ep, 1); + ep = (uct_dc_mlx5_ep_t*)ep_key; + status = uct_dc_mlx5_ep_check_fc(iface, ep); + if ((status != UCS_OK) && (status != UCS_ERR_NO_RESOURCE)) { + ucs_warn("ep %p: flow-control check failed: %s", ep, + ucs_status_string(status)); + } }) return 1; @@ -1646,7 +1641,7 @@ /* Since DCI isn't assigned for the FC endpoint, schedule DCI * allocation for progressing possible FC_PURE_GRANT re-sending * operation which are scheduled on the pending queue */ - uct_dc_mlx5_iface_schedule_dci_alloc(iface, ep, 0); + uct_dc_mlx5_iface_schedule_dci_alloc(iface, ep); } } diff -Nru ucx-1.13.0/src/uct/ib/dc/dc_mlx5_ep.h ucx-1.13.1/src/uct/ib/dc/dc_mlx5_ep.h --- ucx-1.13.0/src/uct/ib/dc/dc_mlx5_ep.h 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/src/uct/ib/dc/dc_mlx5_ep.h 2022-09-12 09:22:28.000000000 +0000 @@ -382,13 +382,12 @@ static UCS_F_ALWAYS_INLINE void uct_dc_mlx5_iface_schedule_dci_alloc(uct_dc_mlx5_iface_t *iface, - uct_dc_mlx5_ep_t *ep, int force) + uct_dc_mlx5_ep_t *ep) { ucs_arbiter_t *waitq; - /* If FC window is empty and force scheduling wasn't requested, the group - * will be scheduled when grant is received */ - if (force || uct_rc_fc_has_resources(&iface->super.super, &ep->fc)) { + /* If FC window is empty the group will be scheduled when grant is received */ + if (uct_rc_fc_has_resources(&iface->super.super, &ep->fc)) { waitq = uct_dc_mlx5_iface_dci_waitq(iface, uct_dc_mlx5_ep_pool_index(ep)); ucs_arbiter_group_schedule(waitq, &ep->arb_group); } @@ -476,7 +475,7 @@ * move the group to the 'wait for dci alloc' state */ ucs_arbiter_group_desched(uct_dc_mlx5_iface_tx_waitq(iface), &ep->arb_group); - uct_dc_mlx5_iface_schedule_dci_alloc(iface, ep, 0); + uct_dc_mlx5_iface_schedule_dci_alloc(iface, ep); } static inline void uct_dc_mlx5_iface_dci_alloc(uct_dc_mlx5_iface_t *iface, uct_dc_mlx5_ep_t *ep) @@ -601,10 +600,8 @@ } if (uct_dc_mlx5_iface_dci_can_alloc(iface, pool_index)) { - if (!(iface->flags & UCT_DC_MLX5_IFACE_IGNORE_DCI_WAITQ_REORDER)) { - waitq = uct_dc_mlx5_iface_dci_waitq(iface, pool_index); - ucs_assert(ucs_arbiter_is_empty(waitq)); - } + waitq = uct_dc_mlx5_iface_dci_waitq(iface, pool_index); + ucs_assert(ucs_arbiter_is_empty(waitq)); uct_dc_mlx5_iface_dci_alloc(iface, ep); return UCS_OK; diff -Nru ucx-1.13.0/src/uct/ib/dc/dc_mlx5.h ucx-1.13.1/src/uct/ib/dc/dc_mlx5.h --- ucx-1.13.0/src/uct/ib/dc/dc_mlx5.h 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/src/uct/ib/dc/dc_mlx5.h 2022-09-12 09:22:28.000000000 +0000 @@ -93,17 +93,14 @@ /** Flow control endpoint is using a DCI in error state */ UCT_DC_MLX5_IFACE_FLAG_FC_EP_FAILED = UCS_BIT(3), - /** Ignore DCI allocation reorder */ - UCT_DC_MLX5_IFACE_IGNORE_DCI_WAITQ_REORDER = UCS_BIT(4), - /** Enable full handshake for DCI */ - UCT_DC_MLX5_IFACE_FLAG_DCI_FULL_HANDSHAKE = UCS_BIT(5), + UCT_DC_MLX5_IFACE_FLAG_DCI_FULL_HANDSHAKE = UCS_BIT(4), /** Enable full handshake for DCT */ - UCT_DC_MLX5_IFACE_FLAG_DCT_FULL_HANDSHAKE = UCS_BIT(6), + UCT_DC_MLX5_IFACE_FLAG_DCT_FULL_HANDSHAKE = UCS_BIT(5), /** Disable PUT capability (RDMA_WRITE) */ - UCT_DC_MLX5_IFACE_FLAG_DISABLE_PUT = UCS_BIT(7) + UCT_DC_MLX5_IFACE_FLAG_DISABLE_PUT = UCS_BIT(6) } uct_dc_mlx5_iface_flags_t; diff -Nru ucx-1.13.0/src/uct/ib/dc/dc_mlx5.inl ucx-1.13.1/src/uct/ib/dc/dc_mlx5.inl --- ucx-1.13.0/src/uct/ib/dc/dc_mlx5.inl 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/src/uct/ib/dc/dc_mlx5.inl 2022-09-12 09:22:28.000000000 +0000 @@ -41,8 +41,7 @@ } static UCS_F_ALWAYS_INLINE void -uct_dc_mlx5_ep_schedule(uct_dc_mlx5_iface_t *iface, uct_dc_mlx5_ep_t *ep, - int force) +uct_dc_mlx5_ep_schedule(uct_dc_mlx5_iface_t *iface, uct_dc_mlx5_ep_t *ep) { if (ep->dci == UCT_DC_MLX5_EP_NO_DCI) { /* no dci: @@ -50,7 +49,7 @@ * arbiter. This way we can assure fairness between all eps waiting for * dci allocation. Relevant for dcs and dcs_quota policies. */ - uct_dc_mlx5_iface_schedule_dci_alloc(iface, ep, force); + uct_dc_mlx5_iface_schedule_dci_alloc(iface, ep); } else { uct_dc_mlx5_iface_dci_sched_tx(iface, ep); } @@ -84,5 +83,5 @@ return; } - uct_dc_mlx5_ep_schedule(iface, ep, 0); + uct_dc_mlx5_ep_schedule(iface, ep); } diff -Nru ucx-1.13.0/src/uct/tcp/tcp_iface.c ucx-1.13.1/src/uct/tcp/tcp_iface.c --- ucx-1.13.0/src/uct/tcp/tcp_iface.c 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/src/uct/tcp/tcp_iface.c 2022-09-12 09:22:28.000000000 +0000 @@ -543,53 +543,6 @@ return status; } -static ucs_status_t uct_tcp_iface_set_port_range(uct_tcp_iface_t *iface, - uct_tcp_iface_config_t *config) -{ - ucs_range_spec_t system_port_range; - unsigned start_range, end_range; - ucs_status_t status; - - if ((config->port_range.first == 0) && (config->port_range.last == 0)) { - /* using a random port */ - goto out_set_config; - } - - /* get the system configuration for usable ports range */ - status = ucs_sockaddr_get_ip_local_port_range(&system_port_range); - if (status != UCS_OK) { - /* using the user's config */ - goto out_set_config; - } - - /* find a common range between the user's ports range and the one on the system */ - start_range = ucs_max(system_port_range.first, config->port_range.first); - end_range = ucs_min(system_port_range.last, config->port_range.last); - - if (start_range > end_range) { - /* there is no common range */ - ucs_error("the requested TCP port range (%d-%d) is outside of system's " - "configured port range (%d-%d)", - config->port_range.first, config->port_range.last, - system_port_range.first, system_port_range.last); - status = UCS_ERR_INVALID_PARAM; - goto out; - } - - iface->port_range.first = start_range; - iface->port_range.last = end_range; - ucs_debug("using TCP port range: %d-%d", iface->port_range.first, iface->port_range.last); - return UCS_OK; - -out_set_config: - iface->port_range.first = config->port_range.first; - iface->port_range.last = config->port_range.last; - ucs_debug("using TCP port range: %d-%d", iface->port_range.first, iface->port_range.last); - return UCS_OK; -out: - return status; -} - static ucs_mpool_ops_t uct_tcp_mpool_ops = { .chunk_alloc = ucs_mpool_chunk_malloc, .chunk_release = ucs_mpool_chunk_free, @@ -681,6 +634,8 @@ self->sockopt.rcvbuf = config->sockopt.rcvbuf; self->config.keepalive.cnt = config->keepalive.cnt; self->config.keepalive.intvl = config->keepalive.intvl; + self->port_range.first = config->port_range.first; + self->port_range.last = config->port_range.last; if (config->keepalive.idle != UCS_MEMUNITS_AUTO) { /* TCP iface configuration sets the keepalive interval */ @@ -694,15 +649,11 @@ ucs_time_from_sec(UCT_TCP_EP_DEFAULT_KEEPALIVE_IDLE); } - status = uct_tcp_iface_set_port_range(self, config); - if (status != UCS_OK) { - goto err; - } - if (self->config.tx_seg_size > self->config.rx_seg_size) { ucs_error("RX segment size (%zu) must be >= TX segment size (%zu)", self->config.rx_seg_size, self->config.tx_seg_size); - return UCS_ERR_INVALID_PARAM; + status = UCS_ERR_INVALID_PARAM; + goto err; } status = ucs_mpool_init(&self->tx_mpool, 0, self->config.tx_seg_size, diff -Nru ucx-1.13.0/test/gtest/uct/ib/test_dc.cc ucx-1.13.1/test/gtest/uct/ib/test_dc.cc --- ucx-1.13.0/test/gtest/uct/ib/test_dc.cc 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/test/gtest/uct/ib/test_dc.cc 2022-09-12 09:22:28.000000000 +0000 @@ -461,23 +461,12 @@ { uct_dc_mlx5_iface_t *iface = ucs_derived_of(e->ep(ep_idx)->iface, uct_dc_mlx5_iface_t); + uct_dc_mlx5_ep_t *ep = ucs_derived_of(e->ep(ep_idx), + uct_dc_mlx5_ep_t); get_fc_ptr(e, ep_idx)->fc_wnd = fc_wnd; - - if (fc_wnd <= iface->super.super.config.fc_hard_thresh) { - int ret; - khiter_t it = kh_put(uct_dc_mlx5_fc_hash, &iface->tx.fc_hash, - (uint64_t)e->ep(ep_idx), &ret); - if ((ret == UCS_KH_PUT_FAILED) || (ret == UCS_KH_PUT_KEY_PRESENT)) { - return; - } - - uct_dc_mlx5_ep_fc_entry_t *fc_entry = &kh_value(&iface->tx.fc_hash, - it); - - fc_entry->seq = iface->tx.fc_seq++; - fc_entry->send_time = ucs_get_time(); - } + ucs_status_t status = uct_dc_mlx5_ep_check_fc(iface, ep); + ASSERT_TRUE((status == UCS_OK) || (status == UCS_ERR_NO_RESOURCE)); } virtual void disable_entity(entity *e) { @@ -499,13 +488,18 @@ iface->tx.dcis[i].txwq.bb_max); } iface->tx.dci_pool[0].stack_top = 0; + + uint8_t pool_index; + for (pool_index = 0; pool_index < iface->tx.num_dci_pools; + ++pool_index) { + uct_dc_mlx5_iface_progress_pending(iface, pool_index); + } } - virtual void ignore_dci_waitq_reorder(uct_test::entity *e) + virtual void test_pending_grant(int wnd, uint64_t *wait_fc_seq = NULL) { - uct_dc_mlx5_iface_t *iface = test_dc::dc_iface(e); - - iface->flags |= UCT_DC_MLX5_IFACE_IGNORE_DCI_WAITQ_REORDER; + test_rc_flow_control::test_pending_grant(wnd, wait_fc_seq); + flush(); } }; @@ -524,12 +518,15 @@ UCS_TEST_P(test_dc_flow_control, pending_grant) { - /* test uses manipulation with available TX resources which may break - DCI allocation ordering. allow out-of-order DCI waitq */ - ignore_dci_waitq_reorder(m_e2); - test_pending_grant(5); - flush(); +} + +UCS_TEST_P(test_dc_flow_control, pending_grant_and_resend_hard_req, + "DC_FC_HARD_REQ_TIMEOUT=0.1s") +{ + uct_dc_mlx5_iface_t *iface = ucs_derived_of(m_e1->iface(), + uct_dc_mlx5_iface_t); + test_pending_grant(5, &iface->tx.fc_seq); } UCS_TEST_P(test_dc_flow_control, fc_disabled_flush) @@ -593,10 +590,6 @@ int wnd = 5; ucs_status_t status; - /* test uses manipulation with available TX resources which may break - DCI allocation ordering. allow out-of-order DCI waitq */ - ignore_dci_waitq_reorder(m_e2); - disable_entity(m_e2); set_fc_attributes(m_e1, true, wnd, @@ -633,10 +626,6 @@ * is scheduled for dci allocation. */ UCS_TEST_P(test_dc_flow_control, dci_leak) { - /* test uses manipulation with available TX resources which may break - DCI allocation ordering. allow out-of-order DCI waitq */ - ignore_dci_waitq_reorder(m_e2); - disable_entity(m_e2); int wnd = 5; set_fc_attributes(m_e1, true, wnd, diff -Nru ucx-1.13.0/test/gtest/uct/ib/test_rc.cc ucx-1.13.1/test/gtest/uct/ib/test_rc.cc --- ucx-1.13.0/test/gtest/uct/ib/test_rc.cc 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/test/gtest/uct/ib/test_rc.cc 2022-09-12 09:22:28.000000000 +0000 @@ -619,7 +619,7 @@ flush(); } -void test_rc_flow_control::test_pending_grant(int wnd) +void test_rc_flow_control::test_pending_grant(int wnd, uint64_t *wait_fc_seq) { /* Block send capabilities of m_e2 for fc grant to be * added to the pending queue. */ @@ -633,6 +633,12 @@ send_am_messages(m_e1, 1, UCS_ERR_NO_RESOURCE); EXPECT_LE(get_fc_ptr(m_e1)->fc_wnd, 0); + if (wait_fc_seq != NULL) { + uint64_t fc_seq_value = *wait_fc_seq; + wait_for_value(wait_fc_seq, fc_seq_value + 1, true); + EXPECT_GT(*wait_fc_seq, fc_seq_value); + } + /* Enable send capabilities of m_e2 and send short put message to force * pending queue dispatch. Can't send AM message for that, because it may * trigger reordering assert due to disable/enable entity hack. */ diff -Nru ucx-1.13.0/test/gtest/uct/ib/test_rc.h ucx-1.13.1/test/gtest/uct/ib/test_rc.h --- ucx-1.13.0/test/gtest/uct/ib/test_rc.h 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/test/gtest/uct/ib/test_rc.h 2022-09-12 09:22:28.000000000 +0000 @@ -136,7 +136,7 @@ void test_general(int wnd, int s_thresh, int h_thresh, bool is_fc_enabled); - void test_pending_grant(int wnd); + virtual void test_pending_grant(int wnd, uint64_t *wait_fc_seq = NULL); void test_pending_purge(int wnd, int num_pend_sends); diff -Nru ucx-1.13.0/ucx.spec.in ucx-1.13.1/ucx.spec.in --- ucx-1.13.0/ucx.spec.in 2022-07-18 07:17:35.000000000 +0000 +++ ucx-1.13.1/ucx.spec.in 2022-09-12 09:22:28.000000000 +0000 @@ -349,6 +349,8 @@ %endif %changelog +* Mon Aug 22 2022 Evgeny Leksikov 1.13.1-1 +- Bump version to 1.13.1 * Wed Nov 10 2021 Yossi Itigin 1.13.0-1 - Bump version to 1.13.0 * Wed Jun 9 2021 Yossi Itigin 1.12.0-1