Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index b909211..c64e154 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
config RDS
tristate "The Reliable Datagram Sockets Protocol"
@@ -22,6 +23,6 @@
This transport does not support RDMA operations.
config RDS_DEBUG
- bool "RDS debugging messages"
+ bool "RDS debugging messages"
depends on RDS
- default n
+ default n
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 65387e1..1a5bf3f 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -254,7 +254,40 @@
static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
- return -ENOIOCTLCMD;
+ struct rds_sock *rs = rds_sk_to_rs(sock->sk);
+ rds_tos_t utos, tos = 0;
+
+ switch (cmd) {
+ case SIOCRDSSETTOS:
+ if (get_user(utos, (rds_tos_t __user *)arg))
+ return -EFAULT;
+
+ if (rs->rs_transport &&
+ rs->rs_transport->get_tos_map)
+ tos = rs->rs_transport->get_tos_map(utos);
+ else
+ return -ENOIOCTLCMD;
+
+ spin_lock_bh(&rds_sock_lock);
+ if (rs->rs_tos || rs->rs_conn) {
+ spin_unlock_bh(&rds_sock_lock);
+ return -EINVAL;
+ }
+ rs->rs_tos = tos;
+ spin_unlock_bh(&rds_sock_lock);
+ break;
+ case SIOCRDSGETTOS:
+ spin_lock_bh(&rds_sock_lock);
+ tos = rs->rs_tos;
+ spin_unlock_bh(&rds_sock_lock);
+ if (put_user(tos, (rds_tos_t __user *)arg))
+ return -EFAULT;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
}
static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval,
@@ -348,7 +381,7 @@
}
static int rds_enable_recvtstamp(struct sock *sk, char __user *optval,
- int optlen)
+ int optlen, int optname)
{
int val, valbool;
@@ -360,6 +393,9 @@
valbool = val ? 1 : 0;
+ if (optname == SO_TIMESTAMP_NEW)
+ sock_set_flag(sk, SOCK_TSTAMP_NEW);
+
if (valbool)
sock_set_flag(sk, SOCK_RCVTSTAMP);
else
@@ -430,9 +466,10 @@
ret = rds_set_transport(rs, optval, optlen);
release_sock(sock->sk);
break;
- case SO_TIMESTAMP:
+ case SO_TIMESTAMP_OLD:
+ case SO_TIMESTAMP_NEW:
lock_sock(sock->sk);
- ret = rds_enable_recvtstamp(sock->sk, optval, optlen);
+ ret = rds_enable_recvtstamp(sock->sk, optval, optlen, optname);
release_sock(sock->sk);
break;
case SO_RDS_MSG_RXPATH_LATENCY:
@@ -506,6 +543,9 @@
struct rds_sock *rs = rds_sk_to_rs(sk);
int ret = 0;
+ if (addr_len < offsetofend(struct sockaddr, sa_family))
+ return -EINVAL;
+
lock_sock(sk);
switch (uaddr->sa_family) {
@@ -519,7 +559,7 @@
ret = -EDESTADDRREQ;
break;
}
- if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) ||
+ if (ipv4_is_multicast(sin->sin_addr.s_addr) ||
sin->sin_addr.s_addr == htonl(INADDR_BROADCAST)) {
ret = -EINVAL;
break;
@@ -553,7 +593,7 @@
addr4 = sin6->sin6_addr.s6_addr32[3];
if (addr4 == htonl(INADDR_ANY) ||
addr4 == htonl(INADDR_BROADCAST) ||
- IN_MULTICAST(ntohl(addr4))) {
+ ipv4_is_multicast(addr4)) {
ret = -EPROTOTYPE;
break;
}
@@ -646,6 +686,8 @@
spin_lock_init(&rs->rs_rdma_lock);
rs->rs_rdma_keys = RB_ROOT;
rs->rs_rx_traces = 0;
+ rs->rs_tos = 0;
+ rs->rs_conn = NULL;
spin_lock_bh(&rds_sock_lock);
list_add_tail(&rs->rs_item, &rds_sock_list);
@@ -663,7 +705,7 @@
if (sock->type != SOCK_SEQPACKET || protocol)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, AF_RDS, GFP_ATOMIC, &rds_proto, kern);
+ sk = sk_alloc(net, AF_RDS, GFP_KERNEL, &rds_proto, kern);
if (!sk)
return -ENOMEM;
@@ -699,6 +741,10 @@
spin_lock_bh(&rds_sock_lock);
list_for_each_entry(rs, &rds_sock_list, rs_item) {
+ /* This option only supports IPv4 sockets. */
+ if (!ipv6_addr_v4mapped(&rs->rs_bound_addr))
+ continue;
+
read_lock(&rs->rs_recv_lock);
/* XXX too lazy to maintain counts.. */
@@ -720,21 +766,60 @@
lens->each = sizeof(struct rds_info_message);
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void rds6_sock_inc_info(struct socket *sock, unsigned int len,
+ struct rds_info_iterator *iter,
+ struct rds_info_lengths *lens)
+{
+ struct rds_incoming *inc;
+ unsigned int total = 0;
+ struct rds_sock *rs;
+
+ len /= sizeof(struct rds6_info_message);
+
+ spin_lock_bh(&rds_sock_lock);
+
+ list_for_each_entry(rs, &rds_sock_list, rs_item) {
+ read_lock(&rs->rs_recv_lock);
+
+ list_for_each_entry(inc, &rs->rs_recv_queue, i_item) {
+ total++;
+ if (total <= len)
+ rds6_inc_info_copy(inc, iter, &inc->i_saddr,
+ &rs->rs_bound_addr, 1);
+ }
+
+ read_unlock(&rs->rs_recv_lock);
+ }
+
+ spin_unlock_bh(&rds_sock_lock);
+
+ lens->nr = total;
+ lens->each = sizeof(struct rds6_info_message);
+}
+#endif
+
static void rds_sock_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
struct rds_info_socket sinfo;
+ unsigned int cnt = 0;
struct rds_sock *rs;
len /= sizeof(struct rds_info_socket);
spin_lock_bh(&rds_sock_lock);
- if (len < rds_sock_count)
+ if (len < rds_sock_count) {
+ cnt = rds_sock_count;
goto out;
+ }
list_for_each_entry(rs, &rds_sock_list, rs_item) {
+ /* This option only supports IPv4 sockets. */
+ if (!ipv6_addr_v4mapped(&rs->rs_bound_addr))
+ continue;
sinfo.sndbuf = rds_sk_sndbuf(rs);
sinfo.rcvbuf = rds_sk_rcvbuf(rs);
sinfo.bound_addr = rs->rs_bound_addr_v4;
@@ -744,15 +829,51 @@
sinfo.inum = sock_i_ino(rds_rs_to_sk(rs));
rds_info_copy(iter, &sinfo, sizeof(sinfo));
+ cnt++;
}
out:
- lens->nr = rds_sock_count;
+ lens->nr = cnt;
lens->each = sizeof(struct rds_info_socket);
spin_unlock_bh(&rds_sock_lock);
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void rds6_sock_info(struct socket *sock, unsigned int len,
+ struct rds_info_iterator *iter,
+ struct rds_info_lengths *lens)
+{
+ struct rds6_info_socket sinfo6;
+ struct rds_sock *rs;
+
+ len /= sizeof(struct rds6_info_socket);
+
+ spin_lock_bh(&rds_sock_lock);
+
+ if (len < rds_sock_count)
+ goto out;
+
+ list_for_each_entry(rs, &rds_sock_list, rs_item) {
+ sinfo6.sndbuf = rds_sk_sndbuf(rs);
+ sinfo6.rcvbuf = rds_sk_rcvbuf(rs);
+ sinfo6.bound_addr = rs->rs_bound_addr;
+ sinfo6.connected_addr = rs->rs_conn_addr;
+ sinfo6.bound_port = rs->rs_bound_port;
+ sinfo6.connected_port = rs->rs_conn_port;
+ sinfo6.inum = sock_i_ino(rds_rs_to_sk(rs));
+
+ rds_info_copy(iter, &sinfo6, sizeof(sinfo6));
+ }
+
+ out:
+ lens->nr = rds_sock_count;
+ lens->each = sizeof(struct rds6_info_socket);
+
+ spin_unlock_bh(&rds_sock_lock);
+}
+#endif
+
static void rds_exit(void)
{
sock_unregister(rds_family_ops.family);
@@ -766,6 +887,10 @@
rds_bind_lock_destroy();
rds_info_deregister_func(RDS_INFO_SOCKETS, rds_sock_info);
rds_info_deregister_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
+#if IS_ENABLED(CONFIG_IPV6)
+ rds_info_deregister_func(RDS6_INFO_SOCKETS, rds6_sock_info);
+ rds_info_deregister_func(RDS6_INFO_RECV_MESSAGES, rds6_sock_inc_info);
+#endif
}
module_exit(rds_exit);
@@ -803,6 +928,10 @@
rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info);
rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
+#if IS_ENABLED(CONFIG_IPV6)
+ rds_info_register_func(RDS6_INFO_SOCKETS, rds6_sock_info);
+ rds_info_register_func(RDS6_INFO_RECV_MESSAGES, rds6_sock_inc_info);
+#endif
goto out;
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 762d2c6..5b5fb4c 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -78,10 +78,10 @@
__rds_create_bind_key(key, addr, port, scope_id);
rcu_read_lock();
rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
- if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
- rds_sock_addref(rs);
- else
+ if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
+ !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
rs = NULL;
+
rcu_read_unlock();
rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
@@ -173,13 +173,15 @@
/* We allow an RDS socket to be bound to either IPv4 or IPv6
* address.
*/
+ if (addr_len < offsetofend(struct sockaddr, sa_family))
+ return -EINVAL;
if (uaddr->sa_family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
if (addr_len < sizeof(struct sockaddr_in) ||
sin->sin_addr.s_addr == htonl(INADDR_ANY) ||
sin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
- IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
+ ipv4_is_multicast(sin->sin_addr.s_addr))
return -EINVAL;
ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &v6addr);
binding_addr = &v6addr;
@@ -204,7 +206,7 @@
addr4 = sin6->sin6_addr.s6_addr32[3];
if (addr4 == htonl(INADDR_ANY) ||
addr4 == htonl(INADDR_BROADCAST) ||
- IN_MULTICAST(ntohl(addr4)))
+ ipv4_is_multicast(addr4))
return -EINVAL;
}
/* The scope ID must be specified for link local address. */
@@ -237,34 +239,33 @@
goto out;
}
+ /* The transport can be set using SO_RDS_TRANSPORT option before the
+ * socket is bound.
+ */
+ if (rs->rs_transport) {
+ trans = rs->rs_transport;
+ if (!trans->laddr_check ||
+ trans->laddr_check(sock_net(sock->sk),
+ binding_addr, scope_id) != 0) {
+ ret = -ENOPROTOOPT;
+ goto out;
+ }
+ } else {
+ trans = rds_trans_get_preferred(sock_net(sock->sk),
+ binding_addr, scope_id);
+ if (!trans) {
+ ret = -EADDRNOTAVAIL;
+ pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
+ __func__, binding_addr);
+ goto out;
+ }
+ rs->rs_transport = trans;
+ }
+
sock_set_flag(sk, SOCK_RCU_FREE);
ret = rds_add_bound(rs, binding_addr, &port, scope_id);
if (ret)
- goto out;
-
- if (rs->rs_transport) { /* previously bound */
- trans = rs->rs_transport;
- if (trans->laddr_check(sock_net(sock->sk),
- binding_addr, scope_id) != 0) {
- ret = -ENOPROTOOPT;
- rds_remove_bound(rs);
- } else {
- ret = 0;
- }
- goto out;
- }
- trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr,
- scope_id);
- if (!trans) {
- ret = -EADDRNOTAVAIL;
- rds_remove_bound(rs);
- pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
- __func__, binding_addr);
- goto out;
- }
-
- rs->rs_transport = trans;
- ret = 0;
+ rs->rs_transport = NULL;
out:
release_sock(sk);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 3bd2f4a..ed7f213 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -84,7 +84,7 @@
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans,
- int dev_if)
+ u8 tos, int dev_if)
{
struct rds_connection *conn, *ret = NULL;
@@ -92,6 +92,7 @@
if (ipv6_addr_equal(&conn->c_faddr, faddr) &&
ipv6_addr_equal(&conn->c_laddr, laddr) &&
conn->c_trans == trans &&
+ conn->c_tos == tos &&
net == rds_conn_net(conn) &&
conn->c_dev_if == dev_if) {
ret = conn;
@@ -139,6 +140,7 @@
atomic_set(&cp->cp_state, RDS_CONN_DOWN);
cp->cp_send_gen = 0;
cp->cp_reconnect_jiffies = 0;
+ cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION;
INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
@@ -159,7 +161,7 @@
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans,
- gfp_t gfp,
+ gfp_t gfp, u8 tos,
int is_outgoing,
int dev_if)
{
@@ -171,7 +173,7 @@
int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
rcu_read_lock();
- conn = rds_conn_lookup(net, head, laddr, faddr, trans, dev_if);
+ conn = rds_conn_lookup(net, head, laddr, faddr, trans, tos, dev_if);
if (conn &&
conn->c_loopback &&
conn->c_trans != &rds_loop_transport &&
@@ -205,6 +207,7 @@
conn->c_isv6 = !ipv6_addr_v4mapped(laddr);
conn->c_faddr = *faddr;
conn->c_dev_if = dev_if;
+ conn->c_tos = tos;
#if IS_ENABLED(CONFIG_IPV6)
/* If the local address is link local, set c_bound_if to be the
@@ -297,7 +300,7 @@
struct rds_connection *found;
found = rds_conn_lookup(net, head, laddr, faddr, trans,
- dev_if);
+ tos, dev_if);
if (found) {
struct rds_conn_path *cp;
int i;
@@ -332,10 +335,10 @@
struct rds_connection *rds_conn_create(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
- struct rds_transport *trans, gfp_t gfp,
- int dev_if)
+ struct rds_transport *trans, u8 tos,
+ gfp_t gfp, int dev_if)
{
- return __rds_conn_create(net, laddr, faddr, trans, gfp, 0, dev_if);
+ return __rds_conn_create(net, laddr, faddr, trans, gfp, tos, 0, dev_if);
}
EXPORT_SYMBOL_GPL(rds_conn_create);
@@ -343,9 +346,9 @@
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans,
- gfp_t gfp, int dev_if)
+ u8 tos, gfp_t gfp, int dev_if)
{
- return __rds_conn_create(net, laddr, faddr, trans, gfp, 1, dev_if);
+ return __rds_conn_create(net, laddr, faddr, trans, gfp, tos, 1, dev_if);
}
EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
@@ -733,6 +736,7 @@
cinfo->next_rx_seq = cp->cp_next_rx_seq;
cinfo->laddr = conn->c_laddr.s6_addr32[3];
cinfo->faddr = conn->c_faddr.s6_addr32[3];
+ cinfo->tos = conn->c_tos;
strncpy(cinfo->transport, conn->c_trans->t_name,
sizeof(cinfo->transport));
cinfo->flags = 0;
diff --git a/net/rds/ib.c b/net/rds/ib.c
index eba75c1..9de2ae2 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -87,7 +87,7 @@
spin_lock_irqsave(&rds_ibdev->spinlock, flags);
list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
- rds_conn_drop(ic->conn);
+ rds_conn_path_drop(&ic->conn->c_path[0], true);
spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
}
@@ -143,13 +143,16 @@
refcount_set(&rds_ibdev->refcount, 1);
INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
+ INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
+ INIT_LIST_HEAD(&rds_ibdev->conn_list);
+
rds_ibdev->max_wrs = device->attrs.max_qp_wr;
rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
has_fr = (device->attrs.device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
- has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
- device->map_phys_fmr && device->unmap_fmr);
+ has_fmr = (device->ops.alloc_fmr && device->ops.dealloc_fmr &&
+ device->ops.map_phys_fmr && device->ops.unmap_fmr);
rds_ibdev->use_fastreg = (has_fr && !has_fmr);
rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32;
@@ -203,9 +206,6 @@
device->name,
rds_ibdev->use_fastreg ? "FRMR" : "FMR");
- INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
- INIT_LIST_HEAD(&rds_ibdev->conn_list);
-
down_write(&rds_ib_devices_lock);
list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
up_write(&rds_ib_devices_lock);
@@ -291,7 +291,7 @@
void *buffer)
{
struct rds_info_rdma_connection *iinfo = buffer;
- struct rds_ib_connection *ic;
+ struct rds_ib_connection *ic = conn->c_transport_data;
/* We will only ever look at IB transports */
if (conn->c_trans != &rds_ib_transport)
@@ -301,14 +301,16 @@
iinfo->src_addr = conn->c_laddr.s6_addr32[3];
iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
+ if (ic) {
+ iinfo->tos = conn->c_tos;
+ iinfo->sl = ic->i_sl;
+ }
memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
- ic = conn->c_transport_data;
-
rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
(union ib_gid *)&iinfo->dst_gid);
@@ -317,6 +319,7 @@
iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
iinfo->max_send_sge = rds_ibdev->max_sge;
rds_ib_get_mr_info(rds_ibdev, iinfo);
+ iinfo->cache_allocs = atomic_read(&ic->i_cache_allocs);
}
return 1;
}
@@ -327,7 +330,7 @@
void *buffer)
{
struct rds6_info_rdma_connection *iinfo6 = buffer;
- struct rds_ib_connection *ic;
+ struct rds_ib_connection *ic = conn->c_transport_data;
/* We will only ever look at IB transports */
if (conn->c_trans != &rds_ib_transport)
@@ -335,6 +338,10 @@
iinfo6->src_addr = conn->c_laddr;
iinfo6->dst_addr = conn->c_faddr;
+ if (ic) {
+ iinfo6->tos = conn->c_tos;
+ iinfo6->sl = ic->i_sl;
+ }
memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
@@ -342,7 +349,6 @@
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
- ic = conn->c_transport_data;
rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
(union ib_gid *)&iinfo6->dst_gid);
rds_ibdev = ic->rds_ibdev;
@@ -350,6 +356,7 @@
iinfo6->max_recv_wr = ic->i_recv_ring.w_nr;
iinfo6->max_send_sge = rds_ibdev->max_sge;
rds6_ib_get_mr_info(rds_ibdev, iinfo6);
+ iinfo6->cache_allocs = atomic_read(&ic->i_cache_allocs);
}
return 1;
}
@@ -514,6 +521,15 @@
rds_ib_mr_exit();
}
+static u8 rds_ib_get_tos_map(u8 tos)
+{
+ /* 1:1 user to transport map for RDMA transport.
+ * In future, if custom map is desired, hook can export
+ * user configurable map.
+ */
+ return tos;
+}
+
struct rds_transport rds_ib_transport = {
.laddr_check = rds_ib_laddr_check,
.xmit_path_complete = rds_ib_xmit_path_complete,
@@ -536,6 +552,7 @@
.sync_mr = rds_ib_sync_mr,
.free_mr = rds_ib_free_mr,
.flush_mrs = rds_ib_flush_mrs,
+ .get_tos_map = rds_ib_get_tos_map,
.t_owner = THIS_MODULE,
.t_name = "infiniband",
.t_unloading = rds_ib_is_unloading,
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 71ff356..f2b558e 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -15,8 +15,7 @@
#define RDS_IB_DEFAULT_RECV_WR 1024
#define RDS_IB_DEFAULT_SEND_WR 256
-#define RDS_IB_DEFAULT_FR_WR 256
-#define RDS_IB_DEFAULT_FR_INV_WR 256
+#define RDS_IB_DEFAULT_FR_WR 512
#define RDS_IB_DEFAULT_RETRY_COUNT 1
@@ -67,7 +66,9 @@
u8 ricpc_protocol_major;
u8 ricpc_protocol_minor;
__be16 ricpc_protocol_minor_mask; /* bitmask */
- __be32 ricpc_reserved1;
+ u8 ricpc_dp_toss;
+ u8 ripc_reserved1;
+ __be16 ripc_reserved2;
__be64 ricpc_ack_seq;
__be32 ricpc_credit; /* non-zero enables flow ctl */
};
@@ -155,7 +156,7 @@
/* To control the number of wrs from fastreg */
atomic_t i_fastreg_wrs;
- atomic_t i_fastunreg_wrs;
+ atomic_t i_fastreg_inuse_count;
/* interrupt handling */
struct tasklet_struct i_send_tasklet;
@@ -219,6 +220,7 @@
/* Send/Recv vectors */
int i_scq_vector;
int i_rcq_vector;
+ u8 i_sl;
};
/* This assumes that atomic_t is at least 32 bits */
@@ -331,10 +333,8 @@
unsigned int i;
for_each_sg(sglist, sg, sg_dma_len, i) {
- ib_dma_sync_single_for_cpu(dev,
- ib_sg_dma_address(dev, sg),
- ib_sg_dma_len(dev, sg),
- direction);
+ ib_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+ sg_dma_len(sg), direction);
}
}
#define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
@@ -348,10 +348,8 @@
unsigned int i;
for_each_sg(sglist, sg, sg_dma_len, i) {
- ib_dma_sync_single_for_device(dev,
- ib_sg_dma_address(dev, sg),
- ib_sg_dma_len(dev, sg),
- direction);
+ ib_dma_sync_single_for_device(dev, sg_dma_address(sg),
+ sg_dma_len(sg), direction);
}
}
#define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index bfbb31f..18c6fac 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -40,6 +40,7 @@
#include "rds_single_path.h"
#include "rds.h"
#include "ib.h"
+#include "ib_mr.h"
/*
* Set the selected protocol version
@@ -133,23 +134,27 @@
rds_ib_set_flow_control(conn, be32_to_cpu(credit));
}
- if (conn->c_version < RDS_PROTOCOL(3, 1)) {
- pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n",
- &conn->c_laddr, &conn->c_faddr,
- RDS_PROTOCOL_MAJOR(conn->c_version),
- RDS_PROTOCOL_MINOR(conn->c_version));
- set_bit(RDS_DESTROY_PENDING, &conn->c_path[0].cp_flags);
- rds_conn_destroy(conn);
- return;
- } else {
- pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c> version %u.%u%s\n",
- ic->i_active_side ? "Active" : "Passive",
- &conn->c_laddr, &conn->c_faddr,
- RDS_PROTOCOL_MAJOR(conn->c_version),
- RDS_PROTOCOL_MINOR(conn->c_version),
- ic->i_flowctl ? ", flow control" : "");
+ if (conn->c_version < RDS_PROTOCOL_VERSION) {
+ if (conn->c_version != RDS_PROTOCOL_COMPAT_VERSION) {
+ pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n",
+ &conn->c_laddr, &conn->c_faddr,
+ RDS_PROTOCOL_MAJOR(conn->c_version),
+ RDS_PROTOCOL_MINOR(conn->c_version));
+ rds_conn_destroy(conn);
+ return;
+ }
}
+ pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c,%d> version %u.%u%s\n",
+ ic->i_active_side ? "Active" : "Passive",
+ &conn->c_laddr, &conn->c_faddr, conn->c_tos,
+ RDS_PROTOCOL_MAJOR(conn->c_version),
+ RDS_PROTOCOL_MINOR(conn->c_version),
+ ic->i_flowctl ? ", flow control" : "");
+
+ /* receive sl from the peer */
+ ic->i_sl = ic->i_cm_id->route.path_rec->sl;
+
atomic_set(&ic->i_cq_quiesce, 0);
/* Init rings and fill recv. this needs to wait until protocol
@@ -184,6 +189,7 @@
NULL);
}
+ conn->c_proposed_version = conn->c_version;
rds_connect_complete(conn);
}
@@ -220,6 +226,7 @@
cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
dp->ricp_v6.dp_ack_seq =
cpu_to_be64(rds_ib_piggyb_ack(ic));
+ dp->ricp_v6.dp_cmn.ricpc_dp_toss = conn->c_tos;
conn_param->private_data = &dp->ricp_v6;
conn_param->private_data_len = sizeof(dp->ricp_v6);
@@ -234,6 +241,7 @@
cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
dp->ricp_v4.dp_ack_seq =
cpu_to_be64(rds_ib_piggyb_ack(ic));
+ dp->ricp_v4.dp_cmn.ricpc_dp_toss = conn->c_tos;
conn_param->private_data = &dp->ricp_v4;
conn_param->private_data_len = sizeof(dp->ricp_v4);
@@ -389,10 +397,9 @@
rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
break;
default:
- rdsdebug("Fatal QP Event %u (%s) "
- "- connection %pI6c->%pI6c, reconnecting\n",
- event->event, ib_event_msg(event->event),
- &conn->c_laddr, &conn->c_faddr);
+ rdsdebug("Fatal QP Event %u (%s) - connection %pI6c->%pI6c, reconnecting\n",
+ event->event, ib_event_msg(event->event),
+ &conn->c_laddr, &conn->c_faddr);
rds_conn_drop(conn);
break;
}
@@ -443,6 +450,7 @@
struct ib_qp_init_attr attr;
struct ib_cq_init_attr cq_attr = {};
struct rds_ib_device *rds_ibdev;
+ unsigned long max_wrs;
int ret, fr_queue_space;
/*
@@ -457,18 +465,20 @@
* completion queue and send queue. This extra space is used for FRMR
* registration and invalidation work requests
*/
- fr_queue_space = rds_ibdev->use_fastreg ?
- (RDS_IB_DEFAULT_FR_WR + 1) +
- (RDS_IB_DEFAULT_FR_INV_WR + 1)
- : 0;
+ fr_queue_space = (rds_ibdev->use_fastreg ? RDS_IB_DEFAULT_FR_WR : 0);
/* add the conn now so that connection establishment has the dev */
rds_ib_add_conn(rds_ibdev, conn);
- if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
- rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
- if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
- rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
+ max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_send_wr + 1 ?
+ rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_send_wr;
+ if (ic->i_send_ring.w_nr != max_wrs)
+ rds_ib_ring_resize(&ic->i_send_ring, max_wrs);
+
+ max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_recv_wr + 1 ?
+ rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_recv_wr;
+ if (ic->i_recv_ring.w_nr != max_wrs)
+ rds_ib_ring_resize(&ic->i_recv_ring, max_wrs);
/* Protection domain and memory range */
ic->i_pd = rds_ibdev->pd;
@@ -526,8 +536,6 @@
attr.qp_type = IB_QPT_RC;
attr.send_cq = ic->i_send_cq;
attr.recv_cq = ic->i_recv_cq;
- atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
- atomic_set(&ic->i_fastunreg_wrs, RDS_IB_DEFAULT_FR_INV_WR);
/*
* XXX this can fail if max_*_wr is too large? Are we supposed
@@ -608,11 +616,11 @@
qp_out:
rdma_destroy_qp(ic->i_cm_id);
recv_cq_out:
- if (!ib_destroy_cq(ic->i_recv_cq))
- ic->i_recv_cq = NULL;
+ ib_destroy_cq(ic->i_recv_cq);
+ ic->i_recv_cq = NULL;
send_cq_out:
- if (!ib_destroy_cq(ic->i_send_cq))
- ic->i_send_cq = NULL;
+ ib_destroy_cq(ic->i_send_cq);
+ ic->i_send_cq = NULL;
rds_ibdev_out:
rds_ib_remove_conn(rds_ibdev, conn);
out:
@@ -660,13 +668,16 @@
/* Even if len is crap *now* I still want to check it. -ASG */
if (event->param.conn.private_data_len < data_len || major == 0)
- return RDS_PROTOCOL_3_0;
+ return RDS_PROTOCOL_4_0;
common = be16_to_cpu(mask) & RDS_IB_SUPPORTED_PROTOCOLS;
- if (major == 3 && common) {
- version = RDS_PROTOCOL_3_0;
+ if (major == 4 && common) {
+ version = RDS_PROTOCOL_4_0;
while ((common >>= 1) != 0)
version++;
+ } else if (RDS_PROTOCOL_COMPAT_VERSION ==
+ RDS_PROTOCOL(major, minor)) {
+ version = RDS_PROTOCOL_COMPAT_VERSION;
} else {
if (isv6)
printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI6c using incompatible protocol version %u.%u\n",
@@ -729,8 +740,10 @@
/* Check whether the remote protocol version matches ours. */
version = rds_ib_protocol_compatible(event, isv6);
- if (!version)
+ if (!version) {
+ err = RDS_RDMA_REJ_INCOMPAT;
goto out;
+ }
dp = event->param.conn.private_data;
if (isv6) {
@@ -771,15 +784,16 @@
daddr6 = &d_mapped_addr;
}
- rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid "
- "0x%llx\n", saddr6, daddr6,
- RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
+ rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid 0x%llx, tos:%d\n",
+ saddr6, daddr6, RDS_PROTOCOL_MAJOR(version),
+ RDS_PROTOCOL_MINOR(version),
(unsigned long long)be64_to_cpu(lguid),
- (unsigned long long)be64_to_cpu(fguid));
+ (unsigned long long)be64_to_cpu(fguid), dp_cmn->ricpc_dp_toss);
/* RDS/IB is not currently netns aware, thus init_net */
conn = rds_conn_create(&init_net, daddr6, saddr6,
- &rds_ib_transport, GFP_KERNEL, ifindex);
+ &rds_ib_transport, dp_cmn->ricpc_dp_toss,
+ GFP_KERNEL, ifindex);
if (IS_ERR(conn)) {
rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
conn = NULL;
@@ -846,7 +860,7 @@
if (conn)
mutex_unlock(&conn->c_cm_lock);
if (err)
- rdma_reject(cm_id, NULL, 0);
+ rdma_reject(cm_id, &err, sizeof(int));
return destroy;
}
@@ -861,7 +875,7 @@
/* If the peer doesn't do protocol negotiation, we must
* default to RDSv3.0 */
- rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0);
+ rds_ib_set_protocol(conn, RDS_PROTOCOL_4_1);
ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */
ret = rds_ib_setup_qp(conn);
@@ -870,7 +884,8 @@
goto out;
}
- rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
+ rds_ib_cm_fill_conn_param(conn, &conn_param, &dp,
+ conn->c_proposed_version,
UINT_MAX, UINT_MAX, isv6);
ret = rdma_connect(cm_id, &conn_param);
if (ret)
@@ -987,6 +1002,11 @@
ic->i_cm_id, err);
}
+ /* kick off "flush_worker" for all pools in order to reap
+ * all FRMR registrations that are still marked "FRMR_IS_INUSE"
+ */
+ rds_ib_flush_mrs();
+
/*
* We want to wait for tx and rx completion to finish
* before we tear down the connection, but we have to be
@@ -999,8 +1019,8 @@
wait_event(rds_ib_ring_empty_wait,
rds_ib_ring_empty(&ic->i_recv_ring) &&
(atomic_read(&ic->i_signaled_sends) == 0) &&
- (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR) &&
- (atomic_read(&ic->i_fastunreg_wrs) == RDS_IB_DEFAULT_FR_INV_WR));
+ (atomic_read(&ic->i_fastreg_inuse_count) == 0) &&
+ (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR));
tasklet_kill(&ic->i_send_tasklet);
tasklet_kill(&ic->i_recv_tasklet);
@@ -1085,8 +1105,9 @@
ic->i_flowctl = 0;
atomic_set(&ic->i_credits, 0);
- rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
- rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
+ /* Re-init rings, but retain sizes. */
+ rds_ib_ring_init(&ic->i_send_ring, ic->i_send_ring.w_nr);
+ rds_ib_ring_init(&ic->i_recv_ring, ic->i_recv_ring.w_nr);
if (ic->i_ibinc) {
rds_inc_put(&ic->i_ibinc->ii_inc);
@@ -1127,13 +1148,14 @@
spin_lock_init(&ic->i_ack_lock);
#endif
atomic_set(&ic->i_signaled_sends, 0);
+ atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
/*
* rds_ib_conn_shutdown() waits for these to be emptied so they
* must be initialized before it can be called.
*/
- rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
- rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
+ rds_ib_ring_init(&ic->i_send_ring, 0);
+ rds_ib_ring_init(&ic->i_recv_ring, 0);
ic->conn = conn;
conn->c_transport_data = ic;
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index e0f70c4..93c0437 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -44,6 +44,17 @@
else
pool = rds_ibdev->mr_1m_pool;
+ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+
+ /* Switch pools if one of the pool is reaching upper limit */
+ if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
+ if (pool->pool_type == RDS_IB_MR_8K_POOL)
+ pool = rds_ibdev->mr_1m_pool;
+ else
+ pool = rds_ibdev->mr_8k_pool;
+ }
+
ibmr = rds_ib_try_reuse_ibmr(pool);
if (ibmr)
return ibmr;
@@ -108,8 +119,8 @@
page_cnt = 0;
for (i = 0; i < sg_dma_len; ++i) {
- unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
- u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
+ unsigned int dma_len = sg_dma_len(&scat[i]);
+ u64 dma_addr = sg_dma_address(&scat[i]);
if (dma_addr & ~PAGE_MASK) {
if (i > 0) {
@@ -148,8 +159,8 @@
page_cnt = 0;
for (i = 0; i < sg_dma_len; ++i) {
- unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
- u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
+ unsigned int dma_len = sg_dma_len(&scat[i]);
+ u64 dma_addr = sg_dma_address(&scat[i]);
for (j = 0; j < dma_len; j += PAGE_SIZE)
dma_pages[page_cnt++] =
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index 6431a02..06ecf9d 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -32,6 +32,24 @@
#include "ib_mr.h"
+static inline void
+rds_transition_frwr_state(struct rds_ib_mr *ibmr,
+ enum rds_ib_fr_state old_state,
+ enum rds_ib_fr_state new_state)
+{
+ if (cmpxchg(&ibmr->u.frmr.fr_state,
+ old_state, new_state) == old_state &&
+ old_state == FRMR_IS_INUSE) {
+ /* enforce order of ibmr->u.frmr.fr_state update
+ * before decrementing i_fastreg_inuse_count
+ */
+ smp_mb__before_atomic();
+ atomic_dec(&ibmr->ic->i_fastreg_inuse_count);
+ if (waitqueue_active(&rds_ib_ring_empty_wait))
+ wake_up(&rds_ib_ring_empty_wait);
+ }
+}
+
static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
int npages)
{
@@ -75,6 +93,8 @@
pool->max_items_soft = pool->max_items;
frmr->fr_state = FRMR_IS_FREE;
+ init_waitqueue_head(&frmr->fr_inv_done);
+ init_waitqueue_head(&frmr->fr_reg_done);
return ibmr;
out_no_cigar:
@@ -116,13 +136,19 @@
if (unlikely(ret != ibmr->sg_len))
return ret < 0 ? ret : -EINVAL;
+ if (cmpxchg(&frmr->fr_state,
+ FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE)
+ return -EBUSY;
+
+ atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
+
/* Perform a WR for the fast_reg_mr. Each individual page
* in the sg list is added to the fast reg page list and placed
* inside the fast_reg_mr WR. The key used is a rolling 8bit
* counter, which should guarantee uniqueness.
*/
ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++);
- frmr->fr_state = FRMR_IS_INUSE;
+ frmr->fr_reg = true;
memset(®_wr, 0, sizeof(reg_wr));
reg_wr.wr.wr_id = (unsigned long)(void *)ibmr;
@@ -138,12 +164,23 @@
ret = ib_post_send(ibmr->ic->i_cm_id->qp, ®_wr.wr, NULL);
if (unlikely(ret)) {
/* Failure here can be because of -ENOMEM as well */
- frmr->fr_state = FRMR_IS_STALE;
+ rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
+
atomic_inc(&ibmr->ic->i_fastreg_wrs);
if (printk_ratelimit())
pr_warn("RDS/IB: %s returned error(%d)\n",
__func__, ret);
+ goto out;
}
+
+ /* Wait for the registration to complete in order to prevent an invalid
+ * access error resulting from a race between the memory region already
+ * being accessed while registration is still pending.
+ */
+ wait_event(frmr->fr_reg_done, !frmr->fr_reg);
+
+out:
+
return ret;
}
@@ -181,8 +218,8 @@
ret = -EINVAL;
for (i = 0; i < ibmr->sg_dma_len; ++i) {
- unsigned int dma_len = ib_sg_dma_len(dev, &ibmr->sg[i]);
- u64 dma_addr = ib_sg_dma_address(dev, &ibmr->sg[i]);
+ unsigned int dma_len = sg_dma_len(&ibmr->sg[i]);
+ u64 dma_addr = sg_dma_address(&ibmr->sg[i]);
frmr->sg_byte_len += dma_len;
if (dma_addr & ~PAGE_MASK) {
@@ -239,8 +276,8 @@
if (frmr->fr_state != FRMR_IS_INUSE)
goto out;
- while (atomic_dec_return(&ibmr->ic->i_fastunreg_wrs) <= 0) {
- atomic_inc(&ibmr->ic->i_fastunreg_wrs);
+ while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
+ atomic_inc(&ibmr->ic->i_fastreg_wrs);
cpu_relax();
}
@@ -255,12 +292,29 @@
ret = ib_post_send(i_cm_id->qp, s_wr, NULL);
if (unlikely(ret)) {
- frmr->fr_state = FRMR_IS_STALE;
+ rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
frmr->fr_inv = false;
- atomic_inc(&ibmr->ic->i_fastunreg_wrs);
+ /* enforce order of frmr->fr_inv update
+ * before incrementing i_fastreg_wrs
+ */
+ smp_mb__before_atomic();
+ atomic_inc(&ibmr->ic->i_fastreg_wrs);
pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
goto out;
}
+
+ /* Wait for the FRMR_IS_FREE (or FRMR_IS_STALE) transition in order to
+ * 1) avoid a silly bouncing between "clean_list" and "drop_list"
+ * triggered by function "rds_ib_reg_frmr" as it is releases frmr
+ * regions whose state is not "FRMR_IS_FREE" right away.
+ * 2) prevents an invalid access error in a race
+ * from a pending "IB_WR_LOCAL_INV" operation
+ * with a teardown ("dma_unmap_sg", "put_page")
+ * and de-registration ("ib_dereg_mr") of the corresponding
+ * memory region.
+ */
+ wait_event(frmr->fr_inv_done, frmr->fr_state != FRMR_IS_INUSE);
+
out:
return ret;
}
@@ -271,7 +325,7 @@
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
if (wc->status != IB_WC_SUCCESS) {
- frmr->fr_state = FRMR_IS_STALE;
+ rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
if (rds_conn_up(ic->conn))
rds_ib_conn_error(ic->conn,
"frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting\n",
@@ -283,12 +337,21 @@
}
if (frmr->fr_inv) {
- frmr->fr_state = FRMR_IS_FREE;
+ rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_FREE);
frmr->fr_inv = false;
- atomic_inc(&ic->i_fastreg_wrs);
- } else {
- atomic_inc(&ic->i_fastunreg_wrs);
+ wake_up(&frmr->fr_inv_done);
}
+
+ if (frmr->fr_reg) {
+ frmr->fr_reg = false;
+ wake_up(&frmr->fr_reg_done);
+ }
+
+ /* enforce order of frmr->{fr_reg,fr_inv} update
+ * before incrementing i_fastreg_wrs
+ */
+ smp_mb__before_atomic();
+ atomic_inc(&ic->i_fastreg_wrs);
}
void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
@@ -296,14 +359,18 @@
{
struct rds_ib_mr *ibmr, *next;
struct rds_ib_frmr *frmr;
- int ret = 0;
+ int ret = 0, ret2;
unsigned int freed = *nfreed;
/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
list_for_each_entry(ibmr, list, unmap_list) {
- if (ibmr->sg_dma_len)
- ret |= rds_ib_post_inv(ibmr);
+ if (ibmr->sg_dma_len) {
+ ret2 = rds_ib_post_inv(ibmr);
+ if (ret2 && !ret)
+ ret = ret2;
+ }
}
+
if (ret)
pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, ret);
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index 5da12c2..9045a8c 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -57,6 +57,9 @@
struct ib_mr *mr;
enum rds_ib_fr_state fr_state;
bool fr_inv;
+ wait_queue_head_t fr_inv_done;
+ bool fr_reg;
+ wait_queue_head_t fr_reg_done;
struct ib_send_wr fr_wr;
unsigned int dma_npages;
unsigned int sg_byte_len;
@@ -97,6 +100,7 @@
struct llist_head free_list; /* unused MRs */
struct llist_head clean_list; /* unused & unmapped MRs */
wait_queue_head_t flush_wait;
+ spinlock_t clean_lock; /* "clean_list" concurrency */
atomic_t free_pinned; /* memory pinned by free MRs */
unsigned long max_items;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 63c8d10..c8c1e3a 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -40,9 +40,6 @@
struct workqueue_struct *rds_ib_mr_wq;
-static DEFINE_PER_CPU(unsigned long, clean_list_grace);
-#define CLEAN_LIST_BUSY_BIT 0
-
static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
{
struct rds_ib_device *rds_ibdev;
@@ -195,12 +192,11 @@
{
struct rds_ib_mr *ibmr = NULL;
struct llist_node *ret;
- unsigned long *flag;
+ unsigned long flags;
- preempt_disable();
- flag = this_cpu_ptr(&clean_list_grace);
- set_bit(CLEAN_LIST_BUSY_BIT, flag);
+ spin_lock_irqsave(&pool->clean_lock, flags);
ret = llist_del_first(&pool->clean_list);
+ spin_unlock_irqrestore(&pool->clean_lock, flags);
if (ret) {
ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
if (pool->pool_type == RDS_IB_MR_8K_POOL)
@@ -209,23 +205,9 @@
rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
}
- clear_bit(CLEAN_LIST_BUSY_BIT, flag);
- preempt_enable();
return ibmr;
}
-static inline void wait_clean_list_grace(void)
-{
- int cpu;
- unsigned long *flag;
-
- for_each_online_cpu(cpu) {
- flag = &per_cpu(clean_list_grace, cpu);
- while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
- cpu_relax();
- }
-}
-
void rds_ib_sync_mr(void *trans_private, int direction)
{
struct rds_ib_mr *ibmr = trans_private;
@@ -324,8 +306,7 @@
* of clusters. Each cluster has linked llist nodes of
* MR_CLUSTER_SIZE mrs that are ready for reuse.
*/
-static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
- struct list_head *list,
+static void list_to_llist_nodes(struct list_head *list,
struct llist_node **nodes_head,
struct llist_node **nodes_tail)
{
@@ -402,8 +383,13 @@
*/
dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
- if (free_all)
+ if (free_all) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->clean_lock, flags);
llist_append_to_list(&pool->clean_list, &unmap_list);
+ spin_unlock_irqrestore(&pool->clean_lock, flags);
+ }
free_goal = rds_ib_flush_goal(pool, free_all);
@@ -416,25 +402,20 @@
rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
if (!list_empty(&unmap_list)) {
- /* we have to make sure that none of the things we're about
- * to put on the clean list would race with other cpus trying
- * to pull items off. The llist would explode if we managed to
- * remove something from the clean list and then add it back again
- * while another CPU was spinning on that same item in llist_del_first.
- *
- * This is pretty unlikely, but just in case wait for an llist grace period
- * here before adding anything back into the clean list.
- */
- wait_clean_list_grace();
+ unsigned long flags;
- list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
- if (ibmr_ret)
+ list_to_llist_nodes(&unmap_list, &clean_nodes, &clean_tail);
+ if (ibmr_ret) {
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
-
+ clean_nodes = clean_nodes->next;
+ }
/* more than one entry in llist nodes */
- if (clean_nodes->next)
- llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
-
+ if (clean_nodes) {
+ spin_lock_irqsave(&pool->clean_lock, flags);
+ llist_add_batch(clean_nodes, clean_tail,
+ &pool->clean_list);
+ spin_unlock_irqrestore(&pool->clean_lock, flags);
+ }
}
atomic_sub(unpinned, &pool->free_pinned);
@@ -454,9 +435,6 @@
struct rds_ib_mr *ibmr = NULL;
int iter = 0;
- if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
-
while (1) {
ibmr = rds_ib_reuse_mr(pool);
if (ibmr)
@@ -472,7 +450,7 @@
rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
- return ERR_PTR(-EAGAIN);
+ break;
}
/* We do have some empty MRs. Flush them out. */
@@ -486,7 +464,7 @@
return ibmr;
}
- return ibmr;
+ return NULL;
}
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
@@ -611,6 +589,7 @@
init_llist_head(&pool->free_list);
init_llist_head(&pool->drop_list);
init_llist_head(&pool->clean_list);
+ spin_lock_init(&pool->clean_lock);
mutex_init(&pool->flush_lock);
init_waitqueue_head(&pool->flush_wait);
INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 2f16146..a0f99bb 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -168,6 +168,7 @@
list_del(&inc->ii_cache_entry);
WARN_ON(!list_empty(&inc->ii_frags));
kmem_cache_free(rds_ib_incoming_slab, inc);
+ atomic_dec(&rds_ib_allocation);
}
rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
@@ -346,8 +347,8 @@
sge->length = sizeof(struct rds_header);
sge = &recv->r_sge[1];
- sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
- sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
+ sge->addr = sg_dma_address(&recv->r_frag->f_sg);
+ sge->length = sg_dma_len(&recv->r_frag->f_sg);
ret = 0;
out:
@@ -384,6 +385,7 @@
unsigned int posted = 0;
int ret = 0;
bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM);
+ bool must_wake = false;
u32 pos;
/* the goal here is to just make sure that someone, somewhere
@@ -404,14 +406,13 @@
recv = &ic->i_recvs[pos];
ret = rds_ib_recv_refill_one(conn, recv, gfp);
if (ret) {
+ must_wake = true;
break;
}
rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
- (long) ib_sg_dma_address(
- ic->i_cm_id->device,
- &recv->r_frag->f_sg));
+ (long)sg_dma_address(&recv->r_frag->f_sg));
/* XXX when can this fail? */
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL);
@@ -424,6 +425,11 @@
}
posted++;
+
+ if ((posted > 128 && need_resched()) || posted > 8192) {
+ must_wake = true;
+ break;
+ }
}
/* We're doing flow control - update the window. */
@@ -446,10 +452,13 @@
* if we should requeue.
*/
if (rds_conn_up(conn) &&
- ((can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
+ (must_wake ||
+ (can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
rds_ib_ring_empty(&ic->i_recv_ring))) {
queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
}
+ if (can_wait)
+ cond_resched();
}
/*
@@ -774,7 +783,7 @@
unsigned long frag_off;
unsigned long to_copy;
unsigned long copied;
- uint64_t uncongested = 0;
+ __le64 uncongested = 0;
void *addr;
/* catch completely corrupt packets */
@@ -791,7 +800,7 @@
copied = 0;
while (copied < RDS_CONG_MAP_BYTES) {
- uint64_t *src, *dst;
+ __le64 *src, *dst;
unsigned int k;
to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
@@ -826,9 +835,7 @@
}
/* the congestion map is in little endian order */
- uncongested = le64_to_cpu(uncongested);
-
- rds_cong_map_updated(map, uncongested);
+ rds_cong_map_updated(map, le64_to_cpu(uncongested));
}
static void rds_ib_process_recv(struct rds_connection *conn,
@@ -986,9 +993,9 @@
} else {
/* We expect errors as the qp is drained during shutdown */
if (rds_conn_up(conn) || rds_conn_connecting(conn))
- rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting\n",
+ rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), disconnecting and reconnecting\n",
&conn->c_laddr, &conn->c_faddr,
- wc->status,
+ conn->c_tos, wc->status,
ib_wc_status_msg(wc->status));
}
@@ -1041,9 +1048,14 @@
si_meminfo(&si);
rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
- rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
- sizeof(struct rds_ib_incoming),
- 0, SLAB_HWCACHE_ALIGN, NULL);
+ rds_ib_incoming_slab =
+ kmem_cache_create_usercopy("rds_ib_incoming",
+ sizeof(struct rds_ib_incoming),
+ 0, SLAB_HWCACHE_ALIGN,
+ offsetof(struct rds_ib_incoming,
+ ii_inc.i_usercopy),
+ sizeof(struct rds_inc_usercopy),
+ NULL);
if (!rds_ib_incoming_slab)
goto out;
@@ -1061,6 +1073,8 @@
void rds_ib_recv_exit(void)
{
+ WARN_ON(atomic_read(&rds_ib_allocation));
+
kmem_cache_destroy(rds_ib_incoming_slab);
kmem_cache_destroy(rds_ib_frag_slab);
}
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 2dcb555..dfe6237 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -69,6 +69,16 @@
complete(rm, notify_status);
}
+static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
+ struct rm_data_op *op,
+ int wc_status)
+{
+ if (op->op_nents)
+ ib_dma_unmap_sg(ic->i_cm_id->device,
+ op->op_sg, op->op_nents,
+ DMA_TO_DEVICE);
+}
+
static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
struct rm_rdma_op *op,
int wc_status)
@@ -129,21 +139,6 @@
rds_ib_stats_inc(s_ib_atomic_fadd);
}
-static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
- struct rm_data_op *op,
- int wc_status)
-{
- struct rds_message *rm = container_of(op, struct rds_message, data);
-
- if (op->op_nents)
- ib_dma_unmap_sg(ic->i_cm_id->device,
- op->op_sg, op->op_nents,
- DMA_TO_DEVICE);
-
- if (rm->rdma.op_active && rm->data.op_notify)
- rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status);
-}
-
/*
* Unmap the resources associated with a struct send_work.
*
@@ -305,8 +300,9 @@
/* We expect errors as the qp is drained during shutdown */
if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
- rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c> had status %u (%s), disconnecting and reconnecting\n",
- &conn->c_laddr, &conn->c_faddr, wc->status,
+ rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), disconnecting and reconnecting\n",
+ &conn->c_laddr, &conn->c_faddr,
+ conn->c_tos, wc->status,
ib_wc_status_msg(wc->status));
}
}
@@ -522,7 +518,7 @@
if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
i = 1;
else
- i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
+ i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc == 0) {
@@ -645,16 +641,16 @@
if (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]) {
len = min(RDS_FRAG_SIZE,
- ib_sg_dma_len(dev, scat) - rm->data.op_dmaoff);
+ sg_dma_len(scat) - rm->data.op_dmaoff);
send->s_wr.num_sge = 2;
- send->s_sge[1].addr = ib_sg_dma_address(dev, scat);
+ send->s_sge[1].addr = sg_dma_address(scat);
send->s_sge[1].addr += rm->data.op_dmaoff;
send->s_sge[1].length = len;
bytes_sent += len;
rm->data.op_dmaoff += len;
- if (rm->data.op_dmaoff == ib_sg_dma_len(dev, scat)) {
+ if (rm->data.op_dmaoff == sg_dma_len(scat)) {
scat++;
rm->data.op_dmasg++;
rm->data.op_dmaoff = 0;
@@ -808,8 +804,8 @@
}
/* Convert our struct scatterlist to struct ib_sge */
- send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
- send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
+ send->s_sge[0].addr = sg_dma_address(op->op_sg);
+ send->s_sge[0].length = sg_dma_len(op->op_sg);
send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
@@ -879,7 +875,7 @@
* Instead of knowing how to return a partial rdma read/write we insist that there
* be enough work requests to send the entire message.
*/
- i = ceil(op->op_count, max_sge);
+ i = DIV_ROUND_UP(op->op_count, max_sge);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc != i) {
@@ -901,7 +897,9 @@
send->s_queued = jiffies;
send->s_op = NULL;
- nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
+ if (!op->op_notify)
+ nr_sig += rds_ib_set_wr_signal_state(ic, send,
+ op->op_notify);
send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
send->s_rdma_wr.remote_addr = remote_addr;
@@ -921,9 +919,8 @@
for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
scat != &op->op_sg[op->op_count]; j++) {
- len = ib_sg_dma_len(ic->i_cm_id->device, scat);
- send->s_sge[j].addr =
- ib_sg_dma_address(ic->i_cm_id->device, scat);
+ len = sg_dma_len(scat);
+ send->s_sge[j].addr = sg_dma_address(scat);
send->s_sge[j].length = len;
send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index 9252ad1..ac46d89 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -42,7 +42,7 @@
static const char *const rds_ib_stat_names[] = {
"ib_connect_raced",
"ib_listen_closed_stale",
- "s_ib_evt_handler_call",
+ "ib_evt_handler_call",
"ib_tasklet_call",
"ib_tx_cq_event",
"ib_tx_ring_full",
diff --git a/net/rds/info.c b/net/rds/info.c
index e367a97..03f6fd5 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -193,7 +193,7 @@
ret = -ENOMEM;
goto out;
}
- ret = get_user_pages_fast(start, nr_pages, 1, pages);
+ ret = get_user_pages_fast(start, nr_pages, FOLL_WRITE, pages);
if (ret != nr_pages) {
if (ret > 0)
nr_pages = ret;
diff --git a/net/rds/message.c b/net/rds/message.c
index 4b00b11..50f13f1 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -308,17 +308,28 @@
/*
* RDS ops use this to grab SG entries from the rm's sg pool.
*/
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
+ int *ret)
{
struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
struct scatterlist *sg_ret;
- WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
- WARN_ON(!nents);
-
- if (rm->m_used_sgs + nents > rm->m_total_sgs)
+ if (WARN_ON(!ret))
return NULL;
+ if (nents <= 0) {
+ pr_warn("rds: alloc sgs failed! nents <= 0\n");
+ *ret = -EINVAL;
+ return NULL;
+ }
+
+ if (rm->m_used_sgs + nents > rm->m_total_sgs) {
+ pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n",
+ rm->m_total_sgs, rm->m_used_sgs, nents);
+ *ret = -ENOMEM;
+ return NULL;
+ }
+
sg_ret = &sg_first[rm->m_used_sgs];
sg_init_table(sg_ret, nents);
rm->m_used_sgs += nents;
@@ -330,8 +341,9 @@
{
struct rds_message *rm;
unsigned int i;
- int num_sgs = ceil(total_len, PAGE_SIZE);
+ int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
int extra_bytes = num_sgs * sizeof(struct scatterlist);
+ int ret;
rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
if (!rm)
@@ -339,11 +351,11 @@
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
- rm->data.op_nents = ceil(total_len, PAGE_SIZE);
- rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
+ rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
+ rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
if (!rm->data.op_sg) {
rds_message_put(rm);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
for (i = 0; i < rm->data.op_nents; ++i) {
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 98237fe..916f5ec 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -158,7 +158,8 @@
{
int ret;
- ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
+ ret = get_user_pages_fast(user_addr, nr_pages, write ? FOLL_WRITE : 0,
+ pages);
if (ret >= 0 && ret < nr_pages) {
while (ret--)
@@ -517,9 +518,10 @@
return tot_pages;
}
-int rds_rdma_extra_size(struct rds_rdma_args *args)
+int rds_rdma_extra_size(struct rds_rdma_args *args,
+ struct rds_iov_vector *iov)
{
- struct rds_iovec vec;
+ struct rds_iovec *vec;
struct rds_iovec __user *local_vec;
int tot_pages = 0;
unsigned int nr_pages;
@@ -530,13 +532,23 @@
if (args->nr_local == 0)
return -EINVAL;
- /* figure out the number of pages in the vector */
- for (i = 0; i < args->nr_local; i++) {
- if (copy_from_user(&vec, &local_vec[i],
- sizeof(struct rds_iovec)))
- return -EFAULT;
+ iov->iov = kcalloc(args->nr_local,
+ sizeof(struct rds_iovec),
+ GFP_KERNEL);
+ if (!iov->iov)
+ return -ENOMEM;
- nr_pages = rds_pages_in_vec(&vec);
+ vec = &iov->iov[0];
+
+ if (copy_from_user(vec, local_vec, args->nr_local *
+ sizeof(struct rds_iovec)))
+ return -EFAULT;
+ iov->len = args->nr_local;
+
+ /* figure out the number of pages in the vector */
+ for (i = 0; i < args->nr_local; i++, vec++) {
+
+ nr_pages = rds_pages_in_vec(vec);
if (nr_pages == 0)
return -EINVAL;
@@ -558,15 +570,15 @@
* Extract all arguments and set up the rdma_op
*/
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg)
+ struct cmsghdr *cmsg,
+ struct rds_iov_vector *vec)
{
struct rds_rdma_args *args;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
- struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
- int iov_size;
+ struct rds_iovec *iovs;
unsigned int i, j;
int ret = 0;
@@ -586,31 +598,23 @@
goto out_ret;
}
- /* Check whether to allocate the iovec area */
- iov_size = args->nr_local * sizeof(struct rds_iovec);
- if (args->nr_local > UIO_FASTIOV) {
- iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
- if (!iovs) {
- ret = -ENOMEM;
- goto out_ret;
- }
+ if (vec->len != args->nr_local) {
+ ret = -EINVAL;
+ goto out_ret;
}
- if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
- ret = -EFAULT;
- goto out;
- }
+ iovs = vec->iov;
nr_pages = rds_rdma_pages(iovs, args->nr_local);
if (nr_pages < 0) {
ret = -EINVAL;
- goto out;
+ goto out_ret;
}
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
- goto out;
+ goto out_ret;
}
op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
@@ -620,11 +624,9 @@
op->op_active = 1;
op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
- op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
- if (!op->op_sg) {
- ret = -ENOMEM;
- goto out;
- }
+ op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
+ if (!op->op_sg)
+ goto out_pages;
if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
@@ -635,20 +637,10 @@
op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
if (!op->op_notifier) {
ret = -ENOMEM;
- goto out;
+ goto out_pages;
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
-
- /* Enable rmda notification on data operation for composite
- * rds messages and make sure notification is enabled only
- * for the data operation which follows it so that application
- * gets notified only after full message gets delivered.
- */
- if (rm->data.op_sg) {
- rm->rdma.op_notify = 0;
- rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
- }
}
/* The cookie contains the R_Key of the remote memory region, and
@@ -681,7 +673,7 @@
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
- goto out;
+ goto out_pages;
else
ret = 0;
@@ -714,13 +706,11 @@
nr_bytes,
(unsigned int) args->remote_vec.bytes);
ret = -EINVAL;
- goto out;
+ goto out_pages;
}
op->op_bytes = nr_bytes;
-out:
- if (iovs != iovstack)
- sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
+out_pages:
kfree(pages);
out_ret:
if (ret)
@@ -838,11 +828,9 @@
rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
rm->atomic.op_active = 1;
rm->atomic.op_recverr = rs->rs_recverr;
- rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
- if (!rm->atomic.op_sg) {
- ret = -ENOMEM;
+ rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret);
+ if (!rm->atomic.op_sg)
goto err;
- }
/* verify 8 byte-aligned */
if (args->local_addr & 0x7) {
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 6b0f57c..5f741e5 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -43,6 +43,9 @@
static struct rdma_cm_id *rds6_rdma_listen_id;
#endif
+/* Per IB specification 7.7.3, service level is a 4-bit field. */
+#define TOS_TO_SL(tos) ((tos) & 0xF)
+
static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event,
bool isv6)
@@ -51,6 +54,8 @@
struct rds_connection *conn = cm_id->context;
struct rds_transport *trans;
int ret = 0;
+ int *err;
+ u8 len;
rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id,
event->event, rdma_event_msg(event->event));
@@ -81,6 +86,7 @@
break;
case RDMA_CM_EVENT_ADDR_RESOLVED:
+ rdma_set_service_type(cm_id, conn->c_tos);
/* XXX do we need to clean up if this fails? */
ret = rdma_resolve_route(cm_id,
RDS_RDMA_RESOLVE_TIMEOUT_MS);
@@ -94,21 +100,39 @@
struct rds_ib_connection *ibic;
ibic = conn->c_transport_data;
- if (ibic && ibic->i_cm_id == cm_id)
+ if (ibic && ibic->i_cm_id == cm_id) {
+ cm_id->route.path_rec[0].sl =
+ TOS_TO_SL(conn->c_tos);
ret = trans->cm_initiate_connect(cm_id, isv6);
- else
+ } else {
rds_conn_drop(conn);
+ }
}
break;
case RDMA_CM_EVENT_ESTABLISHED:
- trans->cm_connect_complete(conn, event);
+ if (conn)
+ trans->cm_connect_complete(conn, event);
break;
case RDMA_CM_EVENT_REJECTED:
+ if (!conn)
+ break;
+ err = (int *)rdma_consumer_reject_data(cm_id, event, &len);
+ if (!err ||
+ (err && len >= sizeof(*err) &&
+ ((*err) <= RDS_RDMA_REJ_INCOMPAT))) {
+ pr_warn("RDS/RDMA: conn <%pI6c, %pI6c> rejected, dropping connection\n",
+ &conn->c_laddr, &conn->c_faddr);
+
+ if (!conn->c_tos)
+ conn->c_proposed_version = RDS_PROTOCOL_COMPAT_VERSION;
+
+ rds_conn_drop(conn);
+ }
rdsdebug("Connection rejected: %s\n",
rdma_reject_msg(cm_id, event->status));
- /* FALLTHROUGH */
+ break;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
@@ -120,6 +144,8 @@
break;
case RDMA_CM_EVENT_DISCONNECTED:
+ if (!conn)
+ break;
rdsdebug("DISCONNECT event - dropping connection "
"%pI6c->%pI6c\n", &conn->c_laddr,
&conn->c_faddr);
diff --git a/net/rds/rdma_transport.h b/net/rds/rdma_transport.h
index 200d313..bfafd4a 100644
--- a/net/rds/rdma_transport.h
+++ b/net/rds/rdma_transport.h
@@ -11,6 +11,12 @@
#define RDS_RDMA_RESOLVE_TIMEOUT_MS 5000
+/* Below reject reason is for legacy interoperability issue with non-linux
+ * RDS endpoints where older version incompatibility is conveyed via value 1.
+ * For future version(s), proper encoded reject reason should be be used.
+ */
+#define RDS_RDMA_REJ_INCOMPAT 1
+
int rds_rdma_conn_connect(struct rds_connection *conn);
int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c4dcf65..53e8691 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -19,10 +19,13 @@
*/
#define RDS_PROTOCOL_3_0 0x0300
#define RDS_PROTOCOL_3_1 0x0301
+#define RDS_PROTOCOL_4_0 0x0400
+#define RDS_PROTOCOL_4_1 0x0401
#define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
#define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
#define RDS_PROTOCOL_MINOR(v) ((v) & 255)
#define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
+#define RDS_PROTOCOL_COMPAT_VERSION RDS_PROTOCOL_3_1
/* The following ports, 16385, 18634, 18635, are registered with IANA as
* the ports to be used for RDS over TCP and UDP. Currently, only RDS over
@@ -48,10 +51,6 @@
}
#endif
-/* XXX is there one of these somewhere? */
-#define ceil(x, y) \
- ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
-
#define RDS_FRAG_SHIFT 12
#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
@@ -155,9 +154,13 @@
struct rds_cong_map *c_fcong;
/* Protocol version */
+ unsigned int c_proposed_version;
unsigned int c_version;
possible_net_t c_net;
+ /* TOS */
+ u8 c_tos;
+
struct list_head c_map_item;
unsigned long c_map_queued;
@@ -268,6 +271,12 @@
#define RDS_MSG_RX_END 2
#define RDS_MSG_RX_CMSG 3
+/* The following values are whitelisted for usercopy */
+struct rds_inc_usercopy {
+ rds_rdma_cookie_t rdma_cookie;
+ ktime_t rx_tstamp;
+};
+
struct rds_incoming {
refcount_t i_refcount;
struct list_head i_item;
@@ -277,8 +286,7 @@
unsigned long i_rx_jiffies;
struct in6_addr i_saddr;
- rds_rdma_cookie_t i_rdma_cookie;
- struct timeval i_rx_tstamp;
+ struct rds_inc_usercopy i_usercopy;
u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
};
@@ -386,6 +394,18 @@
INIT_LIST_HEAD(&q->zcookie_head);
}
+struct rds_iov_vector {
+ struct rds_iovec *iov;
+ int len;
+};
+
+struct rds_iov_vector_arr {
+ struct rds_iov_vector *vec;
+ int len;
+ int indx;
+ int incr;
+};
+
struct rds_message {
refcount_t m_refcount;
struct list_head m_sock_item;
@@ -461,7 +481,6 @@
} rdma;
struct rm_data_op {
unsigned int op_active:1;
- unsigned int op_notify:1;
unsigned int op_nents;
unsigned int op_count;
unsigned int op_dmasg;
@@ -559,6 +578,7 @@
void (*free_mr)(void *trans_private, int invalidate);
void (*flush_mrs)(void);
bool (*t_unloading)(struct rds_connection *conn);
+ u8 (*get_tos_map)(u8 tos);
};
/* Bind hash table key length. It is the sum of the size of a struct
@@ -640,6 +660,7 @@
u8 rs_rx_traces;
u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
struct rds_msg_zcopy_queue rs_zcookie_queue;
+ u8 rs_tos;
};
static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
@@ -701,7 +722,7 @@
uint64_t s_cong_send_blocked;
uint64_t s_recv_bytes_added_to_socket;
uint64_t s_recv_bytes_removed_from_socket;
-
+ uint64_t s_send_stuck_rm;
};
/* af_rds.c */
@@ -748,13 +769,14 @@
struct rds_connection *rds_conn_create(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
- struct rds_transport *trans, gfp_t gfp,
+ struct rds_transport *trans,
+ u8 tos, gfp_t gfp,
int dev_if);
struct rds_connection *rds_conn_create_outgoing(struct net *net,
const struct in6_addr *laddr,
const struct in6_addr *faddr,
struct rds_transport *trans,
- gfp_t gfp, int dev_if);
+ u8 tos, gfp_t gfp, int dev_if);
void rds_conn_shutdown(struct rds_conn_path *cpath);
void rds_conn_destroy(struct rds_connection *conn);
void rds_conn_drop(struct rds_connection *conn);
@@ -827,7 +849,8 @@
/* message.c */
struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
+ int *ret);
int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
bool zcopy);
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
@@ -904,13 +927,13 @@
int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
void rds_rdma_drop_keys(struct rds_sock *rs);
-int rds_rdma_extra_size(struct rds_rdma_args *args);
-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
+int rds_rdma_extra_size(struct rds_rdma_args *args,
+ struct rds_iov_vector *iov);
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
+ struct cmsghdr *cmsg,
+ struct rds_iov_vector *vec);
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
void rds_rdma_free_op(struct rm_rdma_op *ro);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 504cd6b..c840497 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -43,18 +43,14 @@
void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
struct in6_addr *saddr)
{
- int i;
-
refcount_set(&inc->i_refcount, 1);
INIT_LIST_HEAD(&inc->i_item);
inc->i_conn = conn;
inc->i_saddr = *saddr;
- inc->i_rdma_cookie = 0;
- inc->i_rx_tstamp.tv_sec = 0;
- inc->i_rx_tstamp.tv_usec = 0;
+ inc->i_usercopy.rdma_cookie = 0;
+ inc->i_usercopy.rx_tstamp = ktime_set(0, 0);
- for (i = 0; i < RDS_RX_MAX_TRACES; i++)
- inc->i_rx_lat_trace[i] = 0;
+ memset(inc->i_rx_lat_trace, 0, sizeof(inc->i_rx_lat_trace));
}
EXPORT_SYMBOL_GPL(rds_inc_init);
@@ -66,9 +62,8 @@
inc->i_conn = cp->cp_conn;
inc->i_conn_path = cp;
inc->i_saddr = *saddr;
- inc->i_rdma_cookie = 0;
- inc->i_rx_tstamp.tv_sec = 0;
- inc->i_rx_tstamp.tv_usec = 0;
+ inc->i_usercopy.rdma_cookie = 0;
+ inc->i_usercopy.rx_tstamp = ktime_set(0, 0);
}
EXPORT_SYMBOL_GPL(rds_inc_path_init);
@@ -191,7 +186,7 @@
case RDS_EXTHDR_RDMA_DEST:
/* We ignore the size for now. We could stash it
* somewhere and use it for error checking. */
- inc->i_rdma_cookie = rds_rdma_make_cookie(
+ inc->i_usercopy.rdma_cookie = rds_rdma_make_cookie(
be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
@@ -385,7 +380,7 @@
be32_to_cpu(inc->i_hdr.h_len),
inc->i_hdr.h_dport);
if (sock_flag(sk, SOCK_RCVTSTAMP))
- do_gettimeofday(&inc->i_rx_tstamp);
+ inc->i_usercopy.rx_tstamp = ktime_get_real();
rds_inc_addref(inc);
inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
list_add_tail(&inc->i_item, &rs->rs_recv_queue);
@@ -545,18 +540,32 @@
{
int ret = 0;
- if (inc->i_rdma_cookie) {
+ if (inc->i_usercopy.rdma_cookie) {
ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
- sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
+ sizeof(inc->i_usercopy.rdma_cookie),
+ &inc->i_usercopy.rdma_cookie);
if (ret)
goto out;
}
- if ((inc->i_rx_tstamp.tv_sec != 0) &&
+ if ((inc->i_usercopy.rx_tstamp != 0) &&
sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
- ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
- sizeof(struct timeval),
- &inc->i_rx_tstamp);
+ struct __kernel_old_timeval tv =
+ ns_to_kernel_old_timeval(inc->i_usercopy.rx_tstamp);
+
+ if (!sock_flag(rds_rs_to_sk(rs), SOCK_TSTAMP_NEW)) {
+ ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
+ sizeof(tv), &tv);
+ } else {
+ struct __kernel_sock_timeval sk_tv;
+
+ sk_tv.tv_sec = tv.tv_sec;
+ sk_tv.tv_usec = tv.tv_usec;
+
+ ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
+ sizeof(sk_tv), &sk_tv);
+ }
+
if (ret)
goto out;
}
@@ -775,6 +784,7 @@
minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
minfo.len = be32_to_cpu(inc->i_hdr.h_len);
+ minfo.tos = inc->i_conn->c_tos;
if (flip) {
minfo.laddr = daddr;
@@ -803,6 +813,7 @@
minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
+ minfo6.tos = inc->i_conn->c_tos;
if (flip) {
minfo6.laddr = *daddr;
@@ -816,6 +827,8 @@
minfo6.fport = inc->i_hdr.h_dport;
}
+ minfo6.flags = 0;
+
rds_info_copy(iter, &minfo6, sizeof(minfo6));
}
#endif
diff --git a/net/rds/send.c b/net/rds/send.c
index fe785ee..82dcd8b 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -145,6 +145,7 @@
LIST_HEAD(to_be_dropped);
int batch_count;
unsigned long send_gen = 0;
+ int same_rm = 0;
restart:
batch_count = 0;
@@ -200,6 +201,17 @@
rm = cp->cp_xmit_rm;
+ if (!rm) {
+ same_rm = 0;
+ } else {
+ same_rm++;
+ if (same_rm >= 4096) {
+ rds_stats_inc(s_send_stuck_rm);
+ ret = -EAGAIN;
+ break;
+ }
+ }
+
/*
* If between sending messages, we can send a pending congestion
* map update.
@@ -491,14 +503,12 @@
struct rm_rdma_op *ro;
struct rds_notifier *notifier;
unsigned long flags;
- unsigned int notify = 0;
spin_lock_irqsave(&rm->m_rs_lock, flags);
- notify = rm->rdma.op_notify | rm->data.op_notify;
ro = &rm->rdma;
if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
- ro->op_active && notify && ro->op_notifier) {
+ ro->op_active && ro->op_notify && ro->op_notifier) {
notifier = ro->op_notifier;
rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs));
@@ -876,13 +886,18 @@
* rds_message is getting to be quite complicated, and we'd like to allocate
* it all in one go. This figures out how big it needs to be up front.
*/
-static int rds_rm_size(struct msghdr *msg, int num_sgs)
+static int rds_rm_size(struct msghdr *msg, int num_sgs,
+ struct rds_iov_vector_arr *vct)
{
struct cmsghdr *cmsg;
int size = 0;
int cmsg_groups = 0;
int retval;
bool zcopy_cookie = false;
+ struct rds_iov_vector *iov, *tmp_iov;
+
+ if (num_sgs < 0)
+ return -EINVAL;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
@@ -893,8 +908,24 @@
switch (cmsg->cmsg_type) {
case RDS_CMSG_RDMA_ARGS:
+ if (vct->indx >= vct->len) {
+ vct->len += vct->incr;
+ tmp_iov =
+ krealloc(vct->vec,
+ vct->len *
+ sizeof(struct rds_iov_vector),
+ GFP_KERNEL);
+ if (!tmp_iov) {
+ vct->len -= vct->incr;
+ return -ENOMEM;
+ }
+ vct->vec = tmp_iov;
+ }
+ iov = &vct->vec[vct->indx];
+ memset(iov, 0, sizeof(struct rds_iov_vector));
+ vct->indx++;
cmsg_groups |= 1;
- retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
+ retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
if (retval < 0)
return retval;
size += retval;
@@ -951,10 +982,11 @@
}
static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
- struct msghdr *msg, int *allocated_mr)
+ struct msghdr *msg, int *allocated_mr,
+ struct rds_iov_vector_arr *vct)
{
struct cmsghdr *cmsg;
- int ret = 0;
+ int ret = 0, ind = 0;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
@@ -968,7 +1000,10 @@
*/
switch (cmsg->cmsg_type) {
case RDS_CMSG_RDMA_ARGS:
- ret = rds_cmsg_rdma_args(rs, rm, cmsg);
+ if (ind >= vct->indx)
+ return -ENOMEM;
+ ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
+ ind++;
break;
case RDS_CMSG_RDMA_DEST:
@@ -1082,8 +1117,15 @@
size_t total_payload_len = payload_len, rdma_payload_len = 0;
bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
- int num_sgs = ceil(payload_len, PAGE_SIZE);
+ int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
int namelen;
+ struct rds_iov_vector_arr vct;
+ int ind;
+
+ memset(&vct, 0, sizeof(vct));
+
+ /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
+ vct.incr = 1;
/* Mirror Linux UDP mirror of BSD error message compatibility */
/* XXX: Perhaps MSG_MORE someday */
@@ -1102,7 +1144,7 @@
case AF_INET:
if (usin->sin_addr.s_addr == htonl(INADDR_ANY) ||
usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
- IN_MULTICAST(ntohl(usin->sin_addr.s_addr))) {
+ ipv4_is_multicast(usin->sin_addr.s_addr)) {
ret = -EINVAL;
goto out;
}
@@ -1133,7 +1175,7 @@
addr4 = sin6->sin6_addr.s6_addr32[3];
if (addr4 == htonl(INADDR_ANY) ||
addr4 == htonl(INADDR_BROADCAST) ||
- IN_MULTICAST(ntohl(addr4))) {
+ ipv4_is_multicast(addr4)) {
ret = -EINVAL;
goto out;
}
@@ -1220,7 +1262,7 @@
num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
}
/* size of rm including all sgs */
- ret = rds_rm_size(msg, num_sgs);
+ ret = rds_rm_size(msg, num_sgs, &vct);
if (ret < 0)
goto out;
@@ -1232,11 +1274,9 @@
/* Attach data to the rm */
if (payload_len) {
- rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
- if (!rm->data.op_sg) {
- ret = -ENOMEM;
+ rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
+ if (!rm->data.op_sg)
goto out;
- }
ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
if (ret)
goto out;
@@ -1247,12 +1287,13 @@
/* rds_conn_create has a spinlock that runs with IRQ off.
* Caching the conn in the socket helps a lot. */
- if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr))
+ if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) &&
+ rs->rs_tos == rs->rs_conn->c_tos) {
conn = rs->rs_conn;
- else {
+ } else {
conn = rds_conn_create_outgoing(sock_net(sock->sk),
&rs->rs_bound_addr, &daddr,
- rs->rs_transport,
+ rs->rs_transport, rs->rs_tos,
sock->sk->sk_allocation,
scope_id);
if (IS_ERR(conn)) {
@@ -1270,7 +1311,7 @@
rm->m_conn_path = cpath;
/* Parse any control messages the user may have included. */
- ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
+ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
if (ret) {
/* Trigger connection so that its ready for the next retry */
if (ret == -EAGAIN)
@@ -1348,9 +1389,18 @@
if (ret)
goto out;
rds_message_put(rm);
+
+ for (ind = 0; ind < vct.indx; ind++)
+ kfree(vct.vec[ind].iov);
+ kfree(vct.vec);
+
return payload_len;
out:
+ for (ind = 0; ind < vct.indx; ind++)
+ kfree(vct.vec[ind].iov);
+ kfree(vct.vec);
+
/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
* If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
* or in any other way, we need to destroy the MR again */
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 73be187..9e87da4 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -76,6 +76,9 @@
"cong_update_received",
"cong_send_error",
"cong_send_blocked",
+ "recv_bytes_added_to_sock",
+ "recv_bytes_freed_fromsock",
+ "send_stuck_rm",
};
void rds_stats_info_copy(struct rds_info_iterator *iter,
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index b9bbcf3..66121bc 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -267,6 +267,7 @@
tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
tsinfo.last_expected_una = tc->t_last_expected_una;
tsinfo.last_seen_una = tc->t_last_seen_una;
+ tsinfo.tos = tc->t_cpath->cp_conn->c_tos;
rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
}
@@ -452,6 +453,12 @@
static void rds_tcp_exit(void);
+static u8 rds_tcp_get_tos_map(u8 tos)
+{
+ /* all user tos mapped to default 0 for TCP transport */
+ return 0;
+}
+
struct rds_transport rds_tcp_transport = {
.laddr_check = rds_tcp_laddr_check,
.xmit_path_prepare = rds_tcp_xmit_path_prepare,
@@ -466,6 +473,7 @@
.inc_free = rds_tcp_inc_free,
.stats_info_copy = rds_tcp_stats_info_copy,
.exit = rds_tcp_exit,
+ .get_tos_map = rds_tcp_get_tos_map,
.t_owner = THIS_MODULE,
.t_name = "tcp",
.t_type = RDS_TRANS_TCP,
@@ -543,7 +551,7 @@
tbl = kmemdup(rds_tcp_sysctl_table,
sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
if (!tbl) {
- pr_warn("could not set allocate syctl table\n");
+ pr_warn("could not set allocate sysctl table\n");
return -ENOMEM;
}
rtn->ctl_table = tbl;
@@ -600,7 +608,7 @@
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
- if (net != c_net || !tc->t_sock)
+ if (net != c_net)
continue;
if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
list_move_tail(&tc->t_tcp_node, &tmp_list);
@@ -623,7 +631,7 @@
if (rtn->rds_tcp_sysctl)
unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
- if (net != &init_net && rtn->ctl_table)
+ if (net != &init_net)
kfree(rtn->ctl_table);
}
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index c12203f..810a3a4 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -200,7 +200,7 @@
conn = rds_conn_create(sock_net(sock->sk),
my_addr, peer_addr,
- &rds_tcp_transport, GFP_KERNEL, dev_if);
+ &rds_tcp_transport, 0, GFP_KERNEL, dev_if);
if (IS_ERR(conn)) {
ret = PTR_ERR(conn);
diff --git a/net/rds/threads.c b/net/rds/threads.c
index e64f9e4..32dc50f 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -93,6 +93,7 @@
queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
}
rcu_read_unlock();
+ cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION;
}
EXPORT_SYMBOL_GPL(rds_connect_path_complete);