Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/net/rds/send.c b/net/rds/send.c
index fe785ee..82dcd8b 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -145,6 +145,7 @@
 	LIST_HEAD(to_be_dropped);
 	int batch_count;
 	unsigned long send_gen = 0;
+	int same_rm = 0;
 
 restart:
 	batch_count = 0;
@@ -200,6 +201,17 @@
 
 		rm = cp->cp_xmit_rm;
 
+		if (!rm) {
+			same_rm = 0;
+		} else {
+			same_rm++;
+			if (same_rm >= 4096) {
+				rds_stats_inc(s_send_stuck_rm);
+				ret = -EAGAIN;
+				break;
+			}
+		}
+
 		/*
 		 * If between sending messages, we can send a pending congestion
 		 * map update.
@@ -491,14 +503,12 @@
 	struct rm_rdma_op *ro;
 	struct rds_notifier *notifier;
 	unsigned long flags;
-	unsigned int notify = 0;
 
 	spin_lock_irqsave(&rm->m_rs_lock, flags);
 
-	notify =  rm->rdma.op_notify | rm->data.op_notify;
 	ro = &rm->rdma;
 	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
-	    ro->op_active && notify && ro->op_notifier) {
+	    ro->op_active && ro->op_notify && ro->op_notifier) {
 		notifier = ro->op_notifier;
 		rs = rm->m_rs;
 		sock_hold(rds_rs_to_sk(rs));
@@ -876,13 +886,18 @@
  * rds_message is getting to be quite complicated, and we'd like to allocate
  * it all in one go. This figures out how big it needs to be up front.
  */
-static int rds_rm_size(struct msghdr *msg, int num_sgs)
+static int rds_rm_size(struct msghdr *msg, int num_sgs,
+		       struct rds_iov_vector_arr *vct)
 {
 	struct cmsghdr *cmsg;
 	int size = 0;
 	int cmsg_groups = 0;
 	int retval;
 	bool zcopy_cookie = false;
+	struct rds_iov_vector *iov, *tmp_iov;
+
+	if (num_sgs < 0)
+		return -EINVAL;
 
 	for_each_cmsghdr(cmsg, msg) {
 		if (!CMSG_OK(msg, cmsg))
@@ -893,8 +908,24 @@
 
 		switch (cmsg->cmsg_type) {
 		case RDS_CMSG_RDMA_ARGS:
+			if (vct->indx >= vct->len) {
+				vct->len += vct->incr;
+				tmp_iov =
+					krealloc(vct->vec,
+						 vct->len *
+						 sizeof(struct rds_iov_vector),
+						 GFP_KERNEL);
+				if (!tmp_iov) {
+					vct->len -= vct->incr;
+					return -ENOMEM;
+				}
+				vct->vec = tmp_iov;
+			}
+			iov = &vct->vec[vct->indx];
+			memset(iov, 0, sizeof(struct rds_iov_vector));
+			vct->indx++;
 			cmsg_groups |= 1;
-			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
+			retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
 			if (retval < 0)
 				return retval;
 			size += retval;
@@ -951,10 +982,11 @@
 }
 
 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
-			 struct msghdr *msg, int *allocated_mr)
+			 struct msghdr *msg, int *allocated_mr,
+			 struct rds_iov_vector_arr *vct)
 {
 	struct cmsghdr *cmsg;
-	int ret = 0;
+	int ret = 0, ind = 0;
 
 	for_each_cmsghdr(cmsg, msg) {
 		if (!CMSG_OK(msg, cmsg))
@@ -968,7 +1000,10 @@
 		 */
 		switch (cmsg->cmsg_type) {
 		case RDS_CMSG_RDMA_ARGS:
-			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
+			if (ind >= vct->indx)
+				return -ENOMEM;
+			ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
+			ind++;
 			break;
 
 		case RDS_CMSG_RDMA_DEST:
@@ -1082,8 +1117,15 @@
 	size_t total_payload_len = payload_len, rdma_payload_len = 0;
 	bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
 		      sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
-	int num_sgs = ceil(payload_len, PAGE_SIZE);
+	int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
 	int namelen;
+	struct rds_iov_vector_arr vct;
+	int ind;
+
+	memset(&vct, 0, sizeof(vct));
+
+	/* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
+	vct.incr = 1;
 
 	/* Mirror Linux UDP mirror of BSD error message compatibility */
 	/* XXX: Perhaps MSG_MORE someday */
@@ -1102,7 +1144,7 @@
 		case AF_INET:
 			if (usin->sin_addr.s_addr == htonl(INADDR_ANY) ||
 			    usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
-			    IN_MULTICAST(ntohl(usin->sin_addr.s_addr))) {
+			    ipv4_is_multicast(usin->sin_addr.s_addr)) {
 				ret = -EINVAL;
 				goto out;
 			}
@@ -1133,7 +1175,7 @@
 				addr4 = sin6->sin6_addr.s6_addr32[3];
 				if (addr4 == htonl(INADDR_ANY) ||
 				    addr4 == htonl(INADDR_BROADCAST) ||
-				    IN_MULTICAST(ntohl(addr4))) {
+				    ipv4_is_multicast(addr4)) {
 					ret = -EINVAL;
 					goto out;
 				}
@@ -1220,7 +1262,7 @@
 		num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
 	}
 	/* size of rm including all sgs */
-	ret = rds_rm_size(msg, num_sgs);
+	ret = rds_rm_size(msg, num_sgs, &vct);
 	if (ret < 0)
 		goto out;
 
@@ -1232,11 +1274,9 @@
 
 	/* Attach data to the rm */
 	if (payload_len) {
-		rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
-		if (!rm->data.op_sg) {
-			ret = -ENOMEM;
+		rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
+		if (!rm->data.op_sg)
 			goto out;
-		}
 		ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
 		if (ret)
 			goto out;
@@ -1247,12 +1287,13 @@
 
 	/* rds_conn_create has a spinlock that runs with IRQ off.
 	 * Caching the conn in the socket helps a lot. */
-	if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr))
+	if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) &&
+	    rs->rs_tos == rs->rs_conn->c_tos) {
 		conn = rs->rs_conn;
-	else {
+	} else {
 		conn = rds_conn_create_outgoing(sock_net(sock->sk),
 						&rs->rs_bound_addr, &daddr,
-						rs->rs_transport,
+						rs->rs_transport, rs->rs_tos,
 						sock->sk->sk_allocation,
 						scope_id);
 		if (IS_ERR(conn)) {
@@ -1270,7 +1311,7 @@
 	rm->m_conn_path = cpath;
 
 	/* Parse any control messages the user may have included. */
-	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
+	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
 	if (ret) {
 		/* Trigger connection so that its ready for the next retry */
 		if (ret ==  -EAGAIN)
@@ -1348,9 +1389,18 @@
 	if (ret)
 		goto out;
 	rds_message_put(rm);
+
+	for (ind = 0; ind < vct.indx; ind++)
+		kfree(vct.vec[ind].iov);
+	kfree(vct.vec);
+
 	return payload_len;
 
 out:
+	for (ind = 0; ind < vct.indx; ind++)
+		kfree(vct.vec[ind].iov);
+	kfree(vct.vec);
+
 	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
 	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
 	 * or in any other way, we need to destroy the MR again */