aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/common.c2
-rw-r--r--net/bluetooth/hci_core.c89
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/iso.c30
-rw-r--r--net/bluetooth/l2cap_core.c23
-rw-r--r--net/bluetooth/sco.c35
-rw-r--r--net/bluetooth/smp.c31
-rw-r--r--net/ceph/auth_x.c2
-rw-r--r--net/ceph/ceph_common.c58
-rw-r--r--net/ceph/debugfs.c14
-rw-r--r--net/ceph/messenger_v2.c11
-rw-r--r--net/ceph/osdmap.c18
-rw-r--r--net/core/dev_ioctl.c3
-rw-r--r--net/devlink/rate.c4
-rw-r--r--net/ipv4/esp4_offload.c6
-rw-r--r--net/ipv6/esp6_offload.c6
-rw-r--r--net/l2tp/l2tp_core.c6
-rw-r--r--net/mctp/route.c1
-rw-r--r--net/mptcp/options.c54
-rw-r--r--net/mptcp/pm.c20
-rw-r--r--net/mptcp/pm_kernel.c2
-rw-r--r--net/mptcp/protocol.c97
-rw-r--r--net/mptcp/protocol.h3
-rw-r--r--net/openvswitch/actions.c68
-rw-r--r--net/openvswitch/flow_netlink.c64
-rw-r--r--net/openvswitch/flow_netlink.h2
-rw-r--r--net/sched/em_canid.c3
-rw-r--r--net/sched/em_cmp.c5
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/em_text.c11
-rw-r--r--net/unix/af_unix.c3
-rw-r--r--net/vmw_vsock/af_vsock.c40
-rw-r--r--net/xdp/xsk.c143
-rw-r--r--net/xfrm/xfrm_device.c2
-rw-r--r--net/xfrm/xfrm_output.c8
-rw-r--r--net/xfrm/xfrm_state.c30
-rw-r--r--net/xfrm/xfrm_user.c8
37 files changed, 520 insertions, 386 deletions
diff --git a/net/atm/common.c b/net/atm/common.c
index 881c7f259dbd..c4edc1111bf0 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -881,7 +881,7 @@ out_atmproc_exit:
out_atmsvc_exit:
atmsvc_exit();
out_atmpvc_exit:
- atmsvc_exit();
+ atmpvc_exit();
out_unregister_vcc_proto:
proto_unregister(&vcc_proto);
goto out;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 3418d7b964a1..8ccec73dce45 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -3832,13 +3832,14 @@ static void hci_tx_work(struct work_struct *work)
static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_acl_hdr *hdr;
- struct hci_conn *conn;
__u16 handle, flags;
+ int err;
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr) {
bt_dev_err(hdev, "ACL packet too small");
- goto drop;
+ kfree_skb(skb);
+ return;
}
handle = __le16_to_cpu(hdr->handle);
@@ -3850,36 +3851,27 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
hdev->stat.acl_rx++;
- hci_dev_lock(hdev);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- hci_dev_unlock(hdev);
-
- if (conn) {
- hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
-
- /* Send to upper protocol */
- l2cap_recv_acldata(conn, skb, flags);
- return;
- } else {
+ err = l2cap_recv_acldata(hdev, handle, skb, flags);
+ if (err == -ENOENT)
bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
handle);
- }
-
-drop:
- kfree_skb(skb);
+ else if (err)
+ bt_dev_dbg(hdev, "ACL packet recv for handle %d failed: %d",
+ handle, err);
}
/* SCO data packet */
static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_sco_hdr *hdr;
- struct hci_conn *conn;
__u16 handle, flags;
+ int err;
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr) {
bt_dev_err(hdev, "SCO packet too small");
- goto drop;
+ kfree_skb(skb);
+ return;
}
handle = __le16_to_cpu(hdr->handle);
@@ -3891,34 +3883,28 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
hdev->stat.sco_rx++;
- hci_dev_lock(hdev);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- hci_dev_unlock(hdev);
+ hci_skb_pkt_status(skb) = flags & 0x03;
- if (conn) {
- /* Send to upper protocol */
- hci_skb_pkt_status(skb) = flags & 0x03;
- sco_recv_scodata(conn, skb);
- return;
- } else {
+ err = sco_recv_scodata(hdev, handle, skb);
+ if (err == -ENOENT)
bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
handle);
- }
-
-drop:
- kfree_skb(skb);
+ else if (err)
+ bt_dev_dbg(hdev, "SCO packet recv for handle %d failed: %d",
+ handle, err);
}
static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_iso_hdr *hdr;
- struct hci_conn *conn;
__u16 handle, flags;
+ int err;
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr) {
bt_dev_err(hdev, "ISO packet too small");
- goto drop;
+ kfree_skb(skb);
+ return;
}
handle = __le16_to_cpu(hdr->handle);
@@ -3928,22 +3914,13 @@ static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
handle, flags);
- hci_dev_lock(hdev);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- hci_dev_unlock(hdev);
-
- if (!conn) {
+ err = iso_recv(hdev, handle, skb, flags);
+ if (err == -ENOENT)
bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
handle);
- goto drop;
- }
-
- /* Send to upper protocol */
- iso_recv(conn, skb, flags);
- return;
-
-drop:
- kfree_skb(skb);
+ else if (err)
+ bt_dev_dbg(hdev, "ISO packet recv for handle %d failed: %d",
+ handle, err);
}
static bool hci_req_is_complete(struct hci_dev *hdev)
@@ -4121,7 +4098,7 @@ static void hci_rx_work(struct work_struct *work)
}
}
-static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
+static int hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
{
int err;
@@ -4133,16 +4110,19 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
if (!hdev->sent_cmd) {
skb_queue_head(&hdev->cmd_q, skb);
queue_work(hdev->workqueue, &hdev->cmd_work);
- return;
+ return -EINVAL;
}
if (hci_skb_opcode(skb) != HCI_OP_NOP) {
err = hci_send_frame(hdev, skb);
if (err < 0) {
hci_cmd_sync_cancel_sync(hdev, -err);
- return;
+ return err;
}
atomic_dec(&hdev->cmd_cnt);
+ } else {
+ err = -ENODATA;
+ kfree_skb(skb);
}
if (hdev->req_status == HCI_REQ_PEND &&
@@ -4150,12 +4130,15 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
kfree_skb(hdev->req_skb);
hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
}
+
+ return err;
}
static void hci_cmd_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
struct sk_buff *skb;
+ int err;
BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
@@ -4166,7 +4149,9 @@ static void hci_cmd_work(struct work_struct *work)
if (!skb)
return;
- hci_send_cmd_sync(hdev, skb);
+ err = hci_send_cmd_sync(hdev, skb);
+ if (err)
+ return;
rcu_read_lock();
if (test_bit(HCI_RESET, &hdev->flags) ||
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index fc866759910d..ad19022ae127 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1311,7 +1311,9 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
goto done;
}
+ hci_dev_lock(hdev);
mgmt_index_removed(hdev);
+ hci_dev_unlock(hdev);
err = hci_dev_open(hdev->id);
if (err) {
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 3d98cb6291da..616c2fef91d2 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -2314,14 +2314,31 @@ static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason)
iso_conn_del(hcon, bt_to_errno(reason));
}
-void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+int iso_recv(struct hci_dev *hdev, u16 handle, struct sk_buff *skb, u16 flags)
{
- struct iso_conn *conn = hcon->iso_data;
+ struct hci_conn *hcon;
+ struct iso_conn *conn;
struct skb_shared_hwtstamps *hwts;
__u16 pb, ts, len, sn;
- if (!conn)
- goto drop;
+ hci_dev_lock(hdev);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!hcon) {
+ hci_dev_unlock(hdev);
+ kfree_skb(skb);
+ return -ENOENT;
+ }
+
+ conn = iso_conn_hold_unless_zero(hcon->iso_data);
+ hcon = NULL;
+
+ hci_dev_unlock(hdev);
+
+ if (!conn) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
pb = hci_iso_flags_pb(flags);
ts = hci_iso_flags_ts(flags);
@@ -2377,7 +2394,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
hci_skb_pkt_status(skb) = flags & 0x03;
hci_skb_pkt_seqnum(skb) = sn;
iso_recv_frame(conn, skb);
- return;
+ goto done;
}
if (pb == ISO_SINGLE) {
@@ -2455,6 +2472,9 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
drop:
kfree_skb(skb);
+done:
+ iso_conn_put(conn);
+ return 0;
}
static struct hci_cb iso_cb = {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 35c57657bcf4..07b493331fd7 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7510,13 +7510,24 @@ struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
return c;
}
-void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle,
+ struct sk_buff *skb, u16 flags)
{
+ struct hci_conn *hcon;
struct l2cap_conn *conn;
int len;
- /* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
- hci_dev_lock(hcon->hdev);
+ /* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */
+ hci_dev_lock(hdev);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!hcon) {
+ hci_dev_unlock(hdev);
+ kfree_skb(skb);
+ return -ENOENT;
+ }
+
+ hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF);
conn = hcon->l2cap_data;
@@ -7524,12 +7535,13 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
conn = l2cap_conn_add(hcon);
conn = l2cap_conn_hold_unless_zero(conn);
+ hcon = NULL;
- hci_dev_unlock(hcon->hdev);
+ hci_dev_unlock(hdev);
if (!conn) {
kfree_skb(skb);
- return;
+ return -EINVAL;
}
BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
@@ -7643,6 +7655,7 @@ drop:
unlock:
mutex_unlock(&conn->lock);
l2cap_conn_put(conn);
+ return 0;
}
static struct hci_cb l2cap_cb = {
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index ab0cf442d57b..298c2a9ab4df 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1458,22 +1458,39 @@ static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
sco_conn_del(hcon, bt_to_errno(reason));
}
-void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+int sco_recv_scodata(struct hci_dev *hdev, u16 handle, struct sk_buff *skb)
{
- struct sco_conn *conn = hcon->sco_data;
+ struct hci_conn *hcon;
+ struct sco_conn *conn;
- if (!conn)
- goto drop;
+ hci_dev_lock(hdev);
+
+ hcon = hci_conn_hash_lookup_handle(hdev, handle);
+ if (!hcon) {
+ hci_dev_unlock(hdev);
+ kfree_skb(skb);
+ return -ENOENT;
+ }
+
+ conn = sco_conn_hold_unless_zero(hcon->sco_data);
+ hcon = NULL;
+
+ hci_dev_unlock(hdev);
+
+ if (!conn) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
BT_DBG("conn %p len %u", conn, skb->len);
- if (skb->len) {
+ if (skb->len)
sco_recv_frame(conn, skb);
- return;
- }
+ else
+ kfree_skb(skb);
-drop:
- kfree_skb(skb);
+ sco_conn_put(conn);
+ return 0;
}
static struct hci_cb sco_cb = {
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 45512b2ba951..3a1ce04a7a53 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2136,7 +2136,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_chan *smp = chan->data;
struct hci_conn *hcon = conn->hcon;
u8 *pkax, *pkbx, *na, *nb, confirm_hint;
- u32 passkey;
+ u32 passkey = 0;
int err;
bt_dev_dbg(hcon->hdev, "conn %p", conn);
@@ -2188,24 +2188,6 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
-
- /* Only Just-Works pairing requires extra checks */
- if (smp->method != JUST_WORKS)
- goto mackey_and_ltk;
-
- /* If there already exists long term key in local host, leave
- * the decision to user space since the remote device could
- * be legitimate or malicious.
- */
- if (hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
- hcon->role)) {
- /* Set passkey to 0. The value can be any number since
- * it'll be ignored anyway.
- */
- passkey = 0;
- confirm_hint = 1;
- goto confirm;
- }
}
mackey_and_ltk:
@@ -2226,11 +2208,12 @@ mackey_and_ltk:
if (err)
return SMP_UNSPECIFIED;
- confirm_hint = 0;
-
-confirm:
- if (smp->method == JUST_WORKS)
- confirm_hint = 1;
+ /* Always require user confirmation for Just-Works pairing to prevent
+ * impersonation attacks, or in case of a legitimate device that is
+ * repairing use the confirmation as acknowledgment to proceed with the
+ * creation of new keys.
+ */
+ confirm_hint = smp->method == JUST_WORKS ? 1 : 0;
err = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type,
hcon->dst_type, passkey, confirm_hint);
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index b71b1635916e..a21c157daf7d 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -631,6 +631,7 @@ static int handle_auth_session_key(struct ceph_auth_client *ac, u64 global_id,
/* connection secret */
ceph_decode_32_safe(p, end, len, e_inval);
+ ceph_decode_need(p, end, len, e_inval);
dout("%s connection secret blob len %d\n", __func__, len);
if (len > 0) {
dp = *p + ceph_x_encrypt_offset();
@@ -648,6 +649,7 @@ static int handle_auth_session_key(struct ceph_auth_client *ac, u64 global_id,
/* service tickets */
ceph_decode_32_safe(p, end, len, e_inval);
+ ceph_decode_need(p, end, len, e_inval);
dout("%s service tickets blob len %d\n", __func__, len);
if (len > 0) {
ret = ceph_x_proc_ticket_reply(ac, &th->session_key,
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 4c6441536d55..e734e57be083 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -786,41 +786,52 @@ void ceph_reset_client_addr(struct ceph_client *client)
EXPORT_SYMBOL(ceph_reset_client_addr);
/*
- * true if we have the mon map (and have thus joined the cluster)
- */
-static bool have_mon_and_osd_map(struct ceph_client *client)
-{
- return client->monc.monmap && client->monc.monmap->epoch &&
- client->osdc.osdmap && client->osdc.osdmap->epoch;
-}
-
-/*
* mount: join the ceph cluster, and open root directory.
*/
-int __ceph_open_session(struct ceph_client *client, unsigned long started)
+int __ceph_open_session(struct ceph_client *client)
{
- unsigned long timeout = client->options->mount_timeout;
- long err;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ long timeout = ceph_timeout_jiffies(client->options->mount_timeout);
+ bool have_monmap, have_osdmap;
+ int err;
/* open session, and wait for mon and osd maps */
err = ceph_monc_open_session(&client->monc);
if (err < 0)
return err;
- while (!have_mon_and_osd_map(client)) {
- if (timeout && time_after_eq(jiffies, started + timeout))
- return -ETIMEDOUT;
+ add_wait_queue(&client->auth_wq, &wait);
+ for (;;) {
+ mutex_lock(&client->monc.mutex);
+ err = client->auth_err;
+ have_monmap = client->monc.monmap && client->monc.monmap->epoch;
+ mutex_unlock(&client->monc.mutex);
+
+ down_read(&client->osdc.lock);
+ have_osdmap = client->osdc.osdmap && client->osdc.osdmap->epoch;
+ up_read(&client->osdc.lock);
+
+ if (err || (have_monmap && have_osdmap))
+ break;
+
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+ }
+
+ if (!timeout) {
+ err = -ETIMEDOUT;
+ break;
+ }
/* wait */
dout("mount waiting for mon_map\n");
- err = wait_event_interruptible_timeout(client->auth_wq,
- have_mon_and_osd_map(client) || (client->auth_err < 0),
- ceph_timeout_jiffies(timeout));
- if (err < 0)
- return err;
- if (client->auth_err < 0)
- return client->auth_err;
+ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
}
+ remove_wait_queue(&client->auth_wq, &wait);
+
+ if (err)
+ return err;
pr_info("client%llu fsid %pU\n", ceph_client_gid(client),
&client->fsid);
@@ -833,12 +844,11 @@ EXPORT_SYMBOL(__ceph_open_session);
int ceph_open_session(struct ceph_client *client)
{
int ret;
- unsigned long started = jiffies; /* note the start time */
dout("open_session start\n");
mutex_lock(&client->mount_mutex);
- ret = __ceph_open_session(client, started);
+ ret = __ceph_open_session(client);
mutex_unlock(&client->mount_mutex);
return ret;
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 2110439f8a24..83c270bce63c 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -36,8 +36,9 @@ static int monmap_show(struct seq_file *s, void *p)
int i;
struct ceph_client *client = s->private;
+ mutex_lock(&client->monc.mutex);
if (client->monc.monmap == NULL)
- return 0;
+ goto out_unlock;
seq_printf(s, "epoch %d\n", client->monc.monmap->epoch);
for (i = 0; i < client->monc.monmap->num_mon; i++) {
@@ -48,6 +49,9 @@ static int monmap_show(struct seq_file *s, void *p)
ENTITY_NAME(inst->name),
ceph_pr_addr(&inst->addr));
}
+
+out_unlock:
+ mutex_unlock(&client->monc.mutex);
return 0;
}
@@ -56,13 +60,14 @@ static int osdmap_show(struct seq_file *s, void *p)
int i;
struct ceph_client *client = s->private;
struct ceph_osd_client *osdc = &client->osdc;
- struct ceph_osdmap *map = osdc->osdmap;
+ struct ceph_osdmap *map;
struct rb_node *n;
+ down_read(&osdc->lock);
+ map = osdc->osdmap;
if (map == NULL)
- return 0;
+ goto out_unlock;
- down_read(&osdc->lock);
seq_printf(s, "epoch %u barrier %u flags 0x%x\n", map->epoch,
osdc->epoch_barrier, map->flags);
@@ -131,6 +136,7 @@ static int osdmap_show(struct seq_file *s, void *p)
seq_printf(s, "]\n");
}
+out_unlock:
up_read(&osdc->lock);
return 0;
}
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index 9e39378eda00..9e48623018a3 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -1061,13 +1061,16 @@ static int decrypt_control_remainder(struct ceph_connection *con)
static int process_v2_sparse_read(struct ceph_connection *con,
struct page **pages, int spos)
{
- struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
+ struct ceph_msg_data_cursor cursor;
int ret;
+ ceph_msg_data_cursor_init(&cursor, con->in_msg,
+ con->in_msg->sparse_read_total);
+
for (;;) {
char *buf = NULL;
- ret = con->ops->sparse_read(con, cursor, &buf);
+ ret = con->ops->sparse_read(con, &cursor, &buf);
if (ret <= 0)
return ret;
@@ -1085,11 +1088,11 @@ static int process_v2_sparse_read(struct ceph_connection *con,
} else {
struct bio_vec bv;
- get_bvec_at(cursor, &bv);
+ get_bvec_at(&cursor, &bv);
len = min_t(int, len, bv.bv_len);
memcpy_page(bv.bv_page, bv.bv_offset,
spage, soff, len);
- ceph_msg_data_advance(cursor, len);
+ ceph_msg_data_advance(&cursor, len);
}
spos += len;
ret -= len;
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 295098873861..d245fa508e1c 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1504,8 +1504,6 @@ static int decode_new_primary_temp(void **p, void *end,
u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
{
- BUG_ON(osd >= map->max_osd);
-
if (!map->osd_primary_affinity)
return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
@@ -1514,8 +1512,6 @@ u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
{
- BUG_ON(osd >= map->max_osd);
-
if (!map->osd_primary_affinity) {
int i;
@@ -1577,6 +1573,8 @@ static int decode_new_primary_affinity(void **p, void *end,
ceph_decode_32_safe(p, end, osd, e_inval);
ceph_decode_32_safe(p, end, aff, e_inval);
+ if (osd >= map->max_osd)
+ goto e_inval;
ret = set_primary_affinity(map, osd, aff);
if (ret)
@@ -1879,7 +1877,9 @@ static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
osd = ceph_decode_32(p);
w = ceph_decode_32(p);
- BUG_ON(osd >= map->max_osd);
+ if (osd >= map->max_osd)
+ goto e_inval;
+
osdmap_info(map, "osd%d weight 0x%x %s\n", osd, w,
w == CEPH_OSD_IN ? "(in)" :
(w == CEPH_OSD_OUT ? "(out)" : ""));
@@ -1905,13 +1905,15 @@ static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
u32 xorstate;
osd = ceph_decode_32(p);
+ if (osd >= map->max_osd)
+ goto e_inval;
+
if (struct_v >= 5)
xorstate = ceph_decode_32(p);
else
xorstate = ceph_decode_8(p);
if (xorstate == 0)
xorstate = CEPH_OSD_UP;
- BUG_ON(osd >= map->max_osd);
if ((map->osd_state[osd] & CEPH_OSD_UP) &&
(xorstate & CEPH_OSD_UP))
osdmap_info(map, "osd%d down\n", osd);
@@ -1937,7 +1939,9 @@ static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
struct ceph_entity_addr addr;
osd = ceph_decode_32(p);
- BUG_ON(osd >= map->max_osd);
+ if (osd >= map->max_osd)
+ goto e_inval;
+
if (struct_v >= 7)
ret = ceph_decode_entity_addrvec(p, end, msgr2, &addr);
else
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index ad54b12d4b4c..8bb71a10dba0 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -443,6 +443,9 @@ static int generic_hwtstamp_ioctl_lower(struct net_device *dev, int cmd,
struct ifreq ifrr;
int err;
+ if (!kernel_cfg->ifr)
+ return -EINVAL;
+
strscpy_pad(ifrr.ifr_name, dev->name, IFNAMSIZ);
ifrr.ifr_ifru = kernel_cfg->ifr->ifr_ifru;
diff --git a/net/devlink/rate.c b/net/devlink/rate.c
index 264fb82cba19..d157a8419bca 100644
--- a/net/devlink/rate.c
+++ b/net/devlink/rate.c
@@ -828,13 +828,15 @@ void devl_rate_nodes_destroy(struct devlink *devlink)
if (!devlink_rate->parent)
continue;
- refcount_dec(&devlink_rate->parent->refcnt);
if (devlink_rate_is_leaf(devlink_rate))
ops->rate_leaf_parent_set(devlink_rate, NULL, devlink_rate->priv,
NULL, NULL);
else if (devlink_rate_is_node(devlink_rate))
ops->rate_node_parent_set(devlink_rate, NULL, devlink_rate->priv,
NULL, NULL);
+
+ refcount_dec(&devlink_rate->parent->refcnt);
+ devlink_rate->parent = NULL;
}
list_for_each_entry_safe(devlink_rate, tmp, &devlink->rate_list, list) {
if (devlink_rate_is_node(devlink_rate)) {
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index e0d94270da28..05828d4cb6cd 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -122,8 +122,10 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
- __be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
- : htons(ETH_P_IP);
+ const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
+ XFRM_MODE_SKB_CB(skb)->protocol);
+ __be16 type = inner_mode->family == AF_INET6 ? htons(ETH_P_IPV6)
+ : htons(ETH_P_IP);
return skb_eth_gso_segment(skb, features, type);
}
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 7b41fb4f00b5..22410243ebe8 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -158,8 +158,10 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
- __be16 type = x->inner_mode.family == AF_INET ? htons(ETH_P_IP)
- : htons(ETH_P_IPV6);
+ const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
+ XFRM_MODE_SKB_CB(skb)->protocol);
+ __be16 type = inner_mode->family == AF_INET ? htons(ETH_P_IP)
+ : htons(ETH_P_IPV6);
return skb_eth_gso_segment(skb, features, type);
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 369a2f2e459c..0710281dd95a 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1246,9 +1246,9 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
else
l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
- /* Reset skb netfilter state */
- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
+ /* Reset control buffer */
+ memset(skb->cb, 0, sizeof(skb->cb));
+
nf_reset_ct(skb);
/* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 4d314e062ba9..2ac4011a953f 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -623,6 +623,7 @@ static int mctp_dst_output(struct mctp_dst *dst, struct sk_buff *skb)
skb->protocol = htons(ETH_P_MCTP);
skb->pkt_type = PACKET_OUTGOING;
+ skb->dev = dst->dev->dev;
if (skb->len > dst->mtu) {
kfree_skb(skb);
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 1103b3341a70..f24ae7d40e88 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -838,8 +838,11 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
opts->suboptions = 0;
+ /* Force later mptcp_write_options(), but do not use any actual
+ * option space.
+ */
if (unlikely(__mptcp_check_fallback(msk) && !mptcp_check_infinite_map(skb)))
- return false;
+ return true;
if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
if (mptcp_established_options_fastclose(sk, &opt_size, remaining, opts) ||
@@ -1041,6 +1044,31 @@ static void __mptcp_snd_una_update(struct mptcp_sock *msk, u64 new_snd_una)
WRITE_ONCE(msk->snd_una, new_snd_una);
}
+static void rwin_update(struct mptcp_sock *msk, struct sock *ssk,
+ struct sk_buff *skb)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct tcp_sock *tp = tcp_sk(ssk);
+ u64 mptcp_rcv_wnd;
+
+ /* Avoid touching extra cachelines if TCP is going to accept this
+ * skb without filling the TCP-level window even with a possibly
+ * outdated mptcp-level rwin.
+ */
+ if (!skb->len || skb->len < tcp_receive_window(tp))
+ return;
+
+ mptcp_rcv_wnd = atomic64_read(&msk->rcv_wnd_sent);
+ if (!after64(mptcp_rcv_wnd, subflow->rcv_wnd_sent))
+ return;
+
+ /* Some other subflow grew the mptcp-level rwin since rcv_wup,
+ * resync.
+ */
+ tp->rcv_wnd += mptcp_rcv_wnd - subflow->rcv_wnd_sent;
+ subflow->rcv_wnd_sent = mptcp_rcv_wnd;
+}
+
static void ack_update_msk(struct mptcp_sock *msk,
struct sock *ssk,
struct mptcp_options_received *mp_opt)
@@ -1208,6 +1236,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
*/
if (mp_opt.use_ack)
ack_update_msk(msk, sk, &mp_opt);
+ rwin_update(msk, sk, skb);
/* Zero-data-length packets are dropped by the caller and not
* propagated to the MPTCP layer, so the skb extension does not
@@ -1294,6 +1323,10 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
if (rcv_wnd_new != rcv_wnd_old) {
raise_win:
+ /* The msk-level rcv wnd is after the tcp level one,
+ * sync the latter.
+ */
+ rcv_wnd_new = rcv_wnd_old;
win = rcv_wnd_old - ack_seq;
tp->rcv_wnd = min_t(u64, win, U32_MAX);
new_win = tp->rcv_wnd;
@@ -1317,6 +1350,21 @@ raise_win:
update_wspace:
WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
+ subflow->rcv_wnd_sent = rcv_wnd_new;
+}
+
+static void mptcp_track_rwin(struct tcp_sock *tp)
+{
+ const struct sock *ssk = (const struct sock *)tp;
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_sock *msk;
+
+ if (!ssk)
+ return;
+
+ subflow = mptcp_subflow_ctx(ssk);
+ msk = mptcp_sk(subflow->conn);
+ WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
}
__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
@@ -1611,6 +1659,10 @@ mp_rst:
opts->reset_transient,
opts->reset_reason);
return;
+ } else if (unlikely(!opts->suboptions)) {
+ /* Fallback to TCP */
+ mptcp_track_rwin(tp);
+ return;
}
if (OPTION_MPTCP_PRIO & opts->suboptions) {
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 2ff1b9499568..9604b91902b8 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -18,6 +18,7 @@ struct mptcp_pm_add_entry {
u8 retrans_times;
struct timer_list add_timer;
struct mptcp_sock *sock;
+ struct rcu_head rcu;
};
static DEFINE_SPINLOCK(mptcp_pm_list_lock);
@@ -155,7 +156,7 @@ bool mptcp_remove_anno_list_by_saddr(struct mptcp_sock *msk,
entry = mptcp_pm_del_add_timer(msk, addr, false);
ret = entry;
- kfree(entry);
+ kfree_rcu(entry, rcu);
return ret;
}
@@ -345,22 +346,27 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
{
struct mptcp_pm_add_entry *entry;
struct sock *sk = (struct sock *)msk;
- struct timer_list *add_timer = NULL;
+ bool stop_timer = false;
+
+ rcu_read_lock();
spin_lock_bh(&msk->pm.lock);
entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
if (entry && (!check_id || entry->addr.id == addr->id)) {
entry->retrans_times = ADD_ADDR_RETRANS_MAX;
- add_timer = &entry->add_timer;
+ stop_timer = true;
}
if (!check_id && entry)
list_del(&entry->list);
spin_unlock_bh(&msk->pm.lock);
- /* no lock, because sk_stop_timer_sync() is calling timer_delete_sync() */
- if (add_timer)
- sk_stop_timer_sync(sk, add_timer);
+ /* Note: entry might have been removed by another thread.
+ * We hold rcu_read_lock() to ensure it is not freed under us.
+ */
+ if (stop_timer)
+ sk_stop_timer_sync(sk, &entry->add_timer);
+ rcu_read_unlock();
return entry;
}
@@ -415,7 +421,7 @@ static void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
list_for_each_entry_safe(entry, tmp, &free_list, list) {
sk_stop_timer_sync(sk, &entry->add_timer);
- kfree(entry);
+ kfree_rcu(entry, rcu);
}
}
diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c
index 2ae95476dba3..0a50fd5edc06 100644
--- a/net/mptcp/pm_kernel.c
+++ b/net/mptcp/pm_kernel.c
@@ -672,7 +672,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
void mptcp_pm_nl_rm_addr(struct mptcp_sock *msk, u8 rm_id)
{
- if (rm_id && WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
+ if (rm_id && !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) {
u8 limit_add_addr_accepted =
mptcp_pm_get_limit_add_addr_accepted(msk);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 90b4aeca2596..1e413426deee 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -78,6 +78,13 @@ bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib)
if (__mptcp_check_fallback(msk))
return true;
+ /* The caller possibly is not holding the msk socket lock, but
+ * in the fallback case only the current subflow is touching
+ * the OoO queue.
+ */
+ if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
+ return false;
+
spin_lock_bh(&msk->fallback_lock);
if (!msk->allow_infinite_fallback) {
spin_unlock_bh(&msk->fallback_lock);
@@ -937,14 +944,19 @@ static void mptcp_reset_rtx_timer(struct sock *sk)
bool mptcp_schedule_work(struct sock *sk)
{
- if (inet_sk_state_load(sk) != TCP_CLOSE &&
- schedule_work(&mptcp_sk(sk)->work)) {
- /* each subflow already holds a reference to the sk, and the
- * workqueue is invoked by a subflow, so sk can't go away here.
- */
- sock_hold(sk);
+ if (inet_sk_state_load(sk) == TCP_CLOSE)
+ return false;
+
+ /* Get a reference on this socket, mptcp_worker() will release it.
+ * As mptcp_worker() might complete before us, we can not avoid
+ * a sock_hold()/sock_put() if schedule_work() returns false.
+ */
+ sock_hold(sk);
+
+ if (schedule_work(&mptcp_sk(sk)->work))
return true;
- }
+
+ sock_put(sk);
return false;
}
@@ -2399,7 +2411,6 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
/* flags for __mptcp_close_ssk() */
#define MPTCP_CF_PUSH BIT(1)
-#define MPTCP_CF_FASTCLOSE BIT(2)
/* be sure to send a reset only if the caller asked for it, also
* clean completely the subflow status when the subflow reaches
@@ -2410,7 +2421,7 @@ static void __mptcp_subflow_disconnect(struct sock *ssk,
unsigned int flags)
{
if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
- (flags & MPTCP_CF_FASTCLOSE)) {
+ subflow->send_fastclose) {
/* The MPTCP code never wait on the subflow sockets, TCP-level
* disconnect should never fail
*/
@@ -2457,14 +2468,8 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
- if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
- /* be sure to force the tcp_close path
- * to generate the egress reset
- */
- ssk->sk_lingertime = 0;
- sock_set_flag(ssk, SOCK_LINGER);
- subflow->send_fastclose = 1;
- }
+ if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE)
+ tcp_set_state(ssk, TCP_CLOSE);
need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
if (!dispose_it) {
@@ -2560,7 +2565,8 @@ static void __mptcp_close_subflow(struct sock *sk)
if (ssk_state != TCP_CLOSE &&
(ssk_state != TCP_CLOSE_WAIT ||
- inet_sk_state_load(sk) != TCP_ESTABLISHED))
+ inet_sk_state_load(sk) != TCP_ESTABLISHED ||
+ __mptcp_check_fallback(msk)))
continue;
/* 'subflow_data_ready' will re-sched once rx queue is empty */
@@ -2659,7 +2665,7 @@ static void __mptcp_retrans(struct sock *sk)
}
if (!mptcp_send_head(sk))
- return;
+ goto clear_scheduled;
goto reset_timer;
}
@@ -2690,7 +2696,7 @@ static void __mptcp_retrans(struct sock *sk)
if (__mptcp_check_fallback(msk)) {
spin_unlock_bh(&msk->fallback_lock);
release_sock(ssk);
- return;
+ goto clear_scheduled;
}
while (info.sent < info.limit) {
@@ -2722,6 +2728,15 @@ reset_timer:
if (!mptcp_rtx_timer_pending(sk))
mptcp_reset_rtx_timer(sk);
+
+clear_scheduled:
+ /* If no rtx data was available or in case of fallback, there
+ * could be left-over scheduled subflows; clear them all
+ * or later xmit could use bad ones
+ */
+ mptcp_for_each_subflow(msk, subflow)
+ if (READ_ONCE(subflow->scheduled))
+ mptcp_subflow_set_scheduled(subflow, false);
}
/* schedule the timeout timer for the relevant event: either close timeout
@@ -2768,9 +2783,32 @@ static void mptcp_do_fastclose(struct sock *sk)
struct mptcp_sock *msk = mptcp_sk(sk);
mptcp_set_state(sk, TCP_CLOSE);
- mptcp_for_each_subflow_safe(msk, subflow, tmp)
- __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow),
- subflow, MPTCP_CF_FASTCLOSE);
+
+ /* Explicitly send the fastclose reset as need */
+ if (__mptcp_check_fallback(msk))
+ return;
+
+ mptcp_for_each_subflow_safe(msk, subflow, tmp) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ lock_sock(ssk);
+
+ /* Some subflow socket states don't allow/need a reset.*/
+ if ((1 << ssk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ goto unlock;
+
+ subflow->send_fastclose = 1;
+
+ /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
+ * issue in __tcp_select_window(), see tcp_disconnect().
+ */
+ inet_csk(ssk)->icsk_ack.rcv_mss = TCP_MIN_MSS;
+
+ tcp_send_active_reset(ssk, ssk->sk_allocation,
+ SK_RST_REASON_TCP_ABORT_ON_CLOSE);
+unlock:
+ release_sock(ssk);
+ }
}
static void mptcp_worker(struct work_struct *work)
@@ -2797,7 +2835,11 @@ static void mptcp_worker(struct work_struct *work)
__mptcp_close_subflow(sk);
if (mptcp_close_tout_expired(sk)) {
+ struct mptcp_subflow_context *subflow, *tmp;
+
mptcp_do_fastclose(sk);
+ mptcp_for_each_subflow_safe(msk, subflow, tmp)
+ __mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0);
mptcp_close_wake_up(sk);
}
@@ -3222,7 +3264,8 @@ static int mptcp_disconnect(struct sock *sk, int flags)
/* msk->subflow is still intact, the following will not free the first
* subflow
*/
- mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
+ mptcp_do_fastclose(sk);
+ mptcp_destroy_common(msk);
/* The first subflow is already in TCP_CLOSE status, the following
* can't overlap with a fallback anymore
@@ -3401,7 +3444,7 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
}
-void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
+void mptcp_destroy_common(struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow, *tmp;
struct sock *sk = (struct sock *)msk;
@@ -3410,7 +3453,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
/* join list will be eventually flushed (with rst) at sock lock release time */
mptcp_for_each_subflow_safe(msk, subflow, tmp)
- __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
+ __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0);
__skb_queue_purge(&sk->sk_receive_queue);
skb_rbtree_purge(&msk->out_of_order_queue);
@@ -3428,7 +3471,7 @@ static void mptcp_destroy(struct sock *sk)
/* allow the following to close even the initial subflow */
msk->free_first = 1;
- mptcp_destroy_common(msk, 0);
+ mptcp_destroy_common(msk);
sk_sockets_allocated_dec(sk);
}
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 379a88e14e8d..6ca97096607c 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -509,6 +509,7 @@ struct mptcp_subflow_context {
u64 remote_key;
u64 idsn;
u64 map_seq;
+ u64 rcv_wnd_sent;
u32 snd_isn;
u32 token;
u32 rel_write_seq;
@@ -976,7 +977,7 @@ static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
local_bh_enable();
}
-void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags);
+void mptcp_destroy_common(struct mptcp_sock *msk);
#define MPTCP_TOKEN_MAX_RETRIES 4
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 2832e0794197..792ca44a461d 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -572,69 +572,6 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
return 0;
}
-static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
- const struct nlattr *a)
-{
- struct nshhdr *nh;
- size_t length;
- int err;
- u8 flags;
- u8 ttl;
- int i;
-
- struct ovs_key_nsh key;
- struct ovs_key_nsh mask;
-
- err = nsh_key_from_nlattr(a, &key, &mask);
- if (err)
- return err;
-
- /* Make sure the NSH base header is there */
- if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
- return -ENOMEM;
-
- nh = nsh_hdr(skb);
- length = nsh_hdr_len(nh);
-
- /* Make sure the whole NSH header is there */
- err = skb_ensure_writable(skb, skb_network_offset(skb) +
- length);
- if (unlikely(err))
- return err;
-
- nh = nsh_hdr(skb);
- skb_postpull_rcsum(skb, nh, length);
- flags = nsh_get_flags(nh);
- flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
- flow_key->nsh.base.flags = flags;
- ttl = nsh_get_ttl(nh);
- ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
- flow_key->nsh.base.ttl = ttl;
- nsh_set_flags_and_ttl(nh, flags, ttl);
- nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
- mask.base.path_hdr);
- flow_key->nsh.base.path_hdr = nh->path_hdr;
- switch (nh->mdtype) {
- case NSH_M_TYPE1:
- for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
- nh->md1.context[i] =
- OVS_MASKED(nh->md1.context[i], key.context[i],
- mask.context[i]);
- }
- memcpy(flow_key->nsh.context, nh->md1.context,
- sizeof(nh->md1.context));
- break;
- case NSH_M_TYPE2:
- memset(flow_key->nsh.context, 0,
- sizeof(flow_key->nsh.context));
- break;
- default:
- return -EINVAL;
- }
- skb_postpush_rcsum(skb, nh, length);
- return 0;
-}
-
/* Must follow skb_ensure_writable() since that can move the skb data. */
static void set_tp_port(struct sk_buff *skb, __be16 *port,
__be16 new_port, __sum16 *check)
@@ -1130,10 +1067,6 @@ static int execute_masked_set_action(struct sk_buff *skb,
get_mask(a, struct ovs_key_ethernet *));
break;
- case OVS_KEY_ATTR_NSH:
- err = set_nsh(skb, flow_key, a);
- break;
-
case OVS_KEY_ATTR_IPV4:
err = set_ipv4(skb, flow_key, nla_data(a),
get_mask(a, struct ovs_key_ipv4 *));
@@ -1170,6 +1103,7 @@ static int execute_masked_set_action(struct sk_buff *skb,
case OVS_KEY_ATTR_CT_LABELS:
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
+ case OVS_KEY_ATTR_NSH:
err = -EINVAL;
break;
}
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index ad64bb9ab5e2..1cb4f97335d8 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1305,6 +1305,11 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
return 0;
}
+/*
+ * Constructs NSH header 'nh' from attributes of OVS_ACTION_ATTR_PUSH_NSH,
+ * where 'nh' points to a memory block of 'size' bytes. It's assumed that
+ * attributes were previously validated with validate_push_nsh().
+ */
int nsh_hdr_from_nlattr(const struct nlattr *attr,
struct nshhdr *nh, size_t size)
{
@@ -1314,8 +1319,6 @@ int nsh_hdr_from_nlattr(const struct nlattr *attr,
u8 ttl = 0;
int mdlen = 0;
- /* validate_nsh has check this, so we needn't do duplicate check here
- */
if (size < NSH_BASE_HDR_LEN)
return -ENOBUFS;
@@ -1359,46 +1362,6 @@ int nsh_hdr_from_nlattr(const struct nlattr *attr,
return 0;
}
-int nsh_key_from_nlattr(const struct nlattr *attr,
- struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask)
-{
- struct nlattr *a;
- int rem;
-
- /* validate_nsh has check this, so we needn't do duplicate check here
- */
- nla_for_each_nested(a, attr, rem) {
- int type = nla_type(a);
-
- switch (type) {
- case OVS_NSH_KEY_ATTR_BASE: {
- const struct ovs_nsh_key_base *base = nla_data(a);
- const struct ovs_nsh_key_base *base_mask = base + 1;
-
- nsh->base = *base;
- nsh_mask->base = *base_mask;
- break;
- }
- case OVS_NSH_KEY_ATTR_MD1: {
- const struct ovs_nsh_key_md1 *md1 = nla_data(a);
- const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
-
- memcpy(nsh->context, md1->context, sizeof(*md1));
- memcpy(nsh_mask->context, md1_mask->context,
- sizeof(*md1_mask));
- break;
- }
- case OVS_NSH_KEY_ATTR_MD2:
- /* Not supported yet */
- return -ENOTSUPP;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static int nsh_key_put_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool is_push_nsh, bool log)
@@ -2839,17 +2802,13 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
return err;
}
-static bool validate_nsh(const struct nlattr *attr, bool is_mask,
- bool is_push_nsh, bool log)
+static bool validate_push_nsh(const struct nlattr *attr, bool log)
{
struct sw_flow_match match;
struct sw_flow_key key;
- int ret = 0;
ovs_match_init(&match, &key, true, NULL);
- ret = nsh_key_put_from_nlattr(attr, &match, is_mask,
- is_push_nsh, log);
- return !ret;
+ return !nsh_key_put_from_nlattr(attr, &match, false, true, log);
}
/* Return false if there are any non-masked bits set.
@@ -2997,13 +2956,6 @@ static int validate_set(const struct nlattr *a,
break;
- case OVS_KEY_ATTR_NSH:
- if (eth_type != htons(ETH_P_NSH))
- return -EINVAL;
- if (!validate_nsh(nla_data(a), masked, false, log))
- return -EINVAL;
- break;
-
default:
return -EINVAL;
}
@@ -3437,7 +3389,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
return -EINVAL;
}
mac_proto = MAC_PROTO_NONE;
- if (!validate_nsh(nla_data(a), false, true, true))
+ if (!validate_push_nsh(nla_data(a), log))
return -EINVAL;
break;
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index fe7f77fc5f18..ff8cdecbe346 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -65,8 +65,6 @@ int ovs_nla_put_actions(const struct nlattr *attr,
void ovs_nla_free_flow_actions(struct sw_flow_actions *);
void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *);
-int nsh_key_from_nlattr(const struct nlattr *attr, struct ovs_key_nsh *nsh,
- struct ovs_key_nsh *nsh_mask);
int nsh_hdr_from_nlattr(const struct nlattr *attr, struct nshhdr *nh,
size_t size);
diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c
index 5337bc462755..2d27f91d8441 100644
--- a/net/sched/em_canid.c
+++ b/net/sched/em_canid.c
@@ -99,6 +99,9 @@ static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m,
int i;
const struct can_filter *lp;
+ if (!pskb_may_pull(skb, CAN_MTU))
+ return 0;
+
can_id = em_canid_get_id(skb);
if (can_id & CAN_EFF_FLAG) {
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index 64b637f18bc7..48c1bce74f49 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -22,9 +22,12 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info)
{
struct tcf_em_cmp *cmp = (struct tcf_em_cmp *) em->data;
- unsigned char *ptr = tcf_get_base_ptr(skb, cmp->layer) + cmp->off;
+ unsigned char *ptr = tcf_get_base_ptr(skb, cmp->layer);
u32 val = 0;
+ if (!ptr)
+ return 0;
+ ptr += cmp->off;
if (!tcf_valid_offset(skb, ptr, cmp->align))
return 0;
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 4f9f21a05d5e..c65ffa5fff94 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -42,6 +42,8 @@ static int em_nbyte_match(struct sk_buff *skb, struct tcf_ematch *em,
struct nbyte_data *nbyte = (struct nbyte_data *) em->data;
unsigned char *ptr = tcf_get_base_ptr(skb, nbyte->hdr.layer);
+ if (!ptr)
+ return 0;
ptr += nbyte->hdr.off;
if (!tcf_valid_offset(skb, ptr, nbyte->hdr.len))
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 6b3d0af72c39..692e2be1793e 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -29,12 +29,19 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
struct tcf_pkt_info *info)
{
struct text_match *tm = EM_TEXT_PRIV(m);
+ unsigned char *ptr;
int from, to;
- from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data;
+ ptr = tcf_get_base_ptr(skb, tm->from_layer);
+ if (!ptr)
+ return 0;
+ from = ptr - skb->data;
from += tm->from_offset;
- to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data;
+ ptr = tcf_get_base_ptr(skb, tm->to_layer);
+ if (!ptr)
+ return 0;
+ to = ptr - skb->data;
to += tm->to_offset;
return skb_find_text(skb, from, to, tm->config) != UINT_MAX;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 768098dec231..833c3616d2a2 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2954,6 +2954,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
u = unix_sk(sk);
+redo:
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
@@ -2965,7 +2966,6 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
struct sk_buff *skb, *last;
int chunk;
-redo:
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD)) {
err = -ECONNRESET;
@@ -3015,7 +3015,6 @@ again:
goto out;
}
- mutex_lock(&u->iolock);
goto redo;
unlock:
unix_state_unlock(sk);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 76763247a377..a9ca9c3b87b3 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1661,18 +1661,40 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
timeout = schedule_timeout(timeout);
lock_sock(sk);
- if (signal_pending(current)) {
- err = sock_intr_errno(timeout);
- sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
- sock->state = SS_UNCONNECTED;
- vsock_transport_cancel_pkt(vsk);
- vsock_remove_connected(vsk);
- goto out_wait;
- } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
- err = -ETIMEDOUT;
+ /* Connection established. Whatever happens to socket once we
+ * release it, that's not connect()'s concern. No need to go
+ * into signal and timeout handling. Call it a day.
+ *
+ * Note that allowing to "reset" an already established socket
+ * here is racy and insecure.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED)
+ break;
+
+ /* If connection was _not_ established and a signal/timeout came
+ * to be, we want the socket's state reset. User space may want
+ * to retry.
+ *
+ * sk_state != TCP_ESTABLISHED implies that socket is not on
+ * vsock_connected_table. We keep the binding and the transport
+ * assigned.
+ */
+ if (signal_pending(current) || timeout == 0) {
+ err = timeout == 0 ? -ETIMEDOUT : sock_intr_errno(timeout);
+
+ /* Listener might have already responded with
+ * VIRTIO_VSOCK_OP_RESPONSE. Its handling expects our
+ * sk_state == TCP_SYN_SENT, which hereby we break.
+ * In such case VIRTIO_VSOCK_OP_RST will follow.
+ */
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
+
+ /* Try to cancel VIRTIO_VSOCK_OP_REQUEST skb sent out by
+ * transport->connect().
+ */
vsock_transport_cancel_pkt(vsk);
+
goto out_wait;
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 7b0c68a70888..69bbcca8ac75 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -36,20 +36,13 @@
#define TX_BATCH_SIZE 32
#define MAX_PER_SOCKET_BUDGET 32
-struct xsk_addr_node {
- u64 addr;
- struct list_head addr_node;
-};
-
-struct xsk_addr_head {
+struct xsk_addrs {
u32 num_descs;
- struct list_head addrs_list;
+ u64 addrs[MAX_SKB_FRAGS + 1];
};
static struct kmem_cache *xsk_tx_generic_cache;
-#define XSKCB(skb) ((struct xsk_addr_head *)((skb)->cb))
-
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
@@ -558,29 +551,68 @@ static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
return ret;
}
+static bool xsk_skb_destructor_is_addr(struct sk_buff *skb)
+{
+ return (uintptr_t)skb_shinfo(skb)->destructor_arg & 0x1UL;
+}
+
+static u64 xsk_skb_destructor_get_addr(struct sk_buff *skb)
+{
+ return (u64)((uintptr_t)skb_shinfo(skb)->destructor_arg & ~0x1UL);
+}
+
+static void xsk_skb_destructor_set_addr(struct sk_buff *skb, u64 addr)
+{
+ skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t)addr | 0x1UL);
+}
+
+static void xsk_inc_num_desc(struct sk_buff *skb)
+{
+ struct xsk_addrs *xsk_addr;
+
+ if (!xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ xsk_addr->num_descs++;
+ }
+}
+
+static u32 xsk_get_num_desc(struct sk_buff *skb)
+{
+ struct xsk_addrs *xsk_addr;
+
+ if (xsk_skb_destructor_is_addr(skb))
+ return 1;
+
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+
+ return xsk_addr->num_descs;
+}
+
static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
struct sk_buff *skb)
{
- struct xsk_addr_node *pos, *tmp;
+ u32 num_descs = xsk_get_num_desc(skb);
+ struct xsk_addrs *xsk_addr;
u32 descs_processed = 0;
unsigned long flags;
- u32 idx;
+ u32 idx, i;
spin_lock_irqsave(&pool->cq_lock, flags);
idx = xskq_get_prod(pool->cq);
- xskq_prod_write_addr(pool->cq, idx,
- (u64)(uintptr_t)skb_shinfo(skb)->destructor_arg);
- descs_processed++;
+ if (unlikely(num_descs > 1)) {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
- if (unlikely(XSKCB(skb)->num_descs > 1)) {
- list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
+ for (i = 0; i < num_descs; i++) {
xskq_prod_write_addr(pool->cq, idx + descs_processed,
- pos->addr);
+ xsk_addr->addrs[i]);
descs_processed++;
- list_del(&pos->addr_node);
- kmem_cache_free(xsk_tx_generic_cache, pos);
}
+ kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
+ } else {
+ xskq_prod_write_addr(pool->cq, idx,
+ xsk_skb_destructor_get_addr(skb));
+ descs_processed++;
}
xskq_prod_submit_n(pool->cq, descs_processed);
spin_unlock_irqrestore(&pool->cq_lock, flags);
@@ -595,16 +627,6 @@ static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
spin_unlock_irqrestore(&pool->cq_lock, flags);
}
-static void xsk_inc_num_desc(struct sk_buff *skb)
-{
- XSKCB(skb)->num_descs++;
-}
-
-static u32 xsk_get_num_desc(struct sk_buff *skb)
-{
- return XSKCB(skb)->num_descs;
-}
-
static void xsk_destruct_skb(struct sk_buff *skb)
{
struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
@@ -621,27 +643,22 @@ static void xsk_destruct_skb(struct sk_buff *skb)
static void xsk_skb_init_misc(struct sk_buff *skb, struct xdp_sock *xs,
u64 addr)
{
- BUILD_BUG_ON(sizeof(struct xsk_addr_head) > sizeof(skb->cb));
- INIT_LIST_HEAD(&XSKCB(skb)->addrs_list);
skb->dev = xs->dev;
skb->priority = READ_ONCE(xs->sk.sk_priority);
skb->mark = READ_ONCE(xs->sk.sk_mark);
- XSKCB(skb)->num_descs = 0;
skb->destructor = xsk_destruct_skb;
- skb_shinfo(skb)->destructor_arg = (void *)(uintptr_t)addr;
+ xsk_skb_destructor_set_addr(skb, addr);
}
static void xsk_consume_skb(struct sk_buff *skb)
{
struct xdp_sock *xs = xdp_sk(skb->sk);
u32 num_descs = xsk_get_num_desc(skb);
- struct xsk_addr_node *pos, *tmp;
+ struct xsk_addrs *xsk_addr;
if (unlikely(num_descs > 1)) {
- list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
- list_del(&pos->addr_node);
- kmem_cache_free(xsk_tx_generic_cache, pos);
- }
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
}
skb->destructor = sock_wfree;
@@ -701,7 +718,6 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
{
struct xsk_buff_pool *pool = xs->pool;
u32 hr, len, ts, offset, copy, copied;
- struct xsk_addr_node *xsk_addr;
struct sk_buff *skb = xs->skb;
struct page *page;
void *buffer;
@@ -727,16 +743,26 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
return ERR_PTR(err);
}
} else {
- xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
- if (!xsk_addr)
- return ERR_PTR(-ENOMEM);
+ struct xsk_addrs *xsk_addr;
+
+ if (xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
+ GFP_KERNEL);
+ if (!xsk_addr)
+ return ERR_PTR(-ENOMEM);
+
+ xsk_addr->num_descs = 1;
+ xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
+ skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
+ } else {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ }
/* in case of -EOVERFLOW that could happen below,
* xsk_consume_skb() will release this node as whole skb
* would be dropped, which implies freeing all list elements
*/
- xsk_addr->addr = desc->addr;
- list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
+ xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
}
len = desc->len;
@@ -813,10 +839,25 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
}
} else {
int nr_frags = skb_shinfo(skb)->nr_frags;
- struct xsk_addr_node *xsk_addr;
+ struct xsk_addrs *xsk_addr;
struct page *page;
u8 *vaddr;
+ if (xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
+ GFP_KERNEL);
+ if (!xsk_addr) {
+ err = -ENOMEM;
+ goto free_err;
+ }
+
+ xsk_addr->num_descs = 1;
+ xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
+ skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
+ } else {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ }
+
if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
err = -EOVERFLOW;
goto free_err;
@@ -828,13 +869,6 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
goto free_err;
}
- xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
- if (!xsk_addr) {
- __free_page(page);
- err = -ENOMEM;
- goto free_err;
- }
-
vaddr = kmap_local_page(page);
memcpy(vaddr, buffer, len);
kunmap_local(vaddr);
@@ -842,8 +876,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
- xsk_addr->addr = desc->addr;
- list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
+ xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
}
}
@@ -1904,7 +1937,7 @@ static int __init xsk_init(void)
goto out_pernet;
xsk_tx_generic_cache = kmem_cache_create("xsk_generic_xmit_cache",
- sizeof(struct xsk_addr_node),
+ sizeof(struct xsk_addrs),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!xsk_tx_generic_cache) {
err = -ENOMEM;
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 44b9de6e4e77..52ae0e034d29 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -438,7 +438,7 @@ ok:
check_tunnel_size = x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
x->props.mode == XFRM_MODE_TUNNEL;
- switch (x->inner_mode.family) {
+ switch (skb_dst(skb)->ops->family) {
case AF_INET:
/* Check for IPv4 options */
if (ip_hdr(skb)->ihl != 5)
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 9077730ff7d0..54222fcbd7fd 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -698,7 +698,7 @@ static void xfrm_get_inner_ipproto(struct sk_buff *skb, struct xfrm_state *x)
return;
if (x->outer_mode.encap == XFRM_MODE_TUNNEL) {
- switch (x->outer_mode.family) {
+ switch (skb_dst(skb)->ops->family) {
case AF_INET:
xo->inner_ipproto = ip_hdr(skb)->protocol;
break;
@@ -772,8 +772,12 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
/* Exclusive direct xmit for tunnel mode, as
* some filtering or matching rules may apply
* in transport mode.
+ * Locally generated packets also require
+ * the normal XFRM path for L2 header setup,
+ * as the hardware needs the L2 header to match
+ * for encryption, so skip direct output as well.
*/
- if (x->props.mode == XFRM_MODE_TUNNEL)
+ if (x->props.mode == XFRM_MODE_TUNNEL && !skb->sk)
return xfrm_dev_direct_output(sk, x, skb);
return xfrm_output_resume(sk, skb, 0);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d213ca3653a8..9e14e453b55c 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -592,6 +592,7 @@ void xfrm_state_free(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_free);
+static void xfrm_state_delete_tunnel(struct xfrm_state *x);
static void xfrm_state_gc_destroy(struct xfrm_state *x)
{
if (x->mode_cbs && x->mode_cbs->destroy_state)
@@ -607,6 +608,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
kfree(x->replay_esn);
kfree(x->preplay_esn);
xfrm_unset_type_offload(x);
+ xfrm_state_delete_tunnel(x);
if (x->type) {
x->type->destructor(x);
xfrm_put_type(x->type);
@@ -806,7 +808,6 @@ void __xfrm_state_destroy(struct xfrm_state *x)
}
EXPORT_SYMBOL(__xfrm_state_destroy);
-static void xfrm_state_delete_tunnel(struct xfrm_state *x);
int __xfrm_state_delete(struct xfrm_state *x)
{
struct net *net = xs_net(x);
@@ -2073,6 +2074,7 @@ static struct xfrm_state *xfrm_state_clone_and_setup(struct xfrm_state *orig,
return x;
error:
+ x->km.state = XFRM_STATE_DEAD;
xfrm_state_put(x);
out:
return NULL;
@@ -2157,11 +2159,15 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
xfrm_state_insert(xc);
} else {
if (xfrm_state_add(xc) < 0)
- goto error;
+ goto error_add;
}
return xc;
+error_add:
+ if (xuo)
+ xfrm_dev_state_delete(xc);
error:
+ xc->km.state = XFRM_STATE_DEAD;
xfrm_state_put(xc);
return NULL;
}
@@ -2191,14 +2197,18 @@ int xfrm_state_update(struct xfrm_state *x)
}
if (x1->km.state == XFRM_STATE_ACQ) {
- if (x->dir && x1->dir != x->dir)
+ if (x->dir && x1->dir != x->dir) {
+ to_put = x1;
goto out;
+ }
__xfrm_state_insert(x);
x = NULL;
} else {
- if (x1->dir != x->dir)
+ if (x1->dir != x->dir) {
+ to_put = x1;
goto out;
+ }
}
err = 0;
@@ -3298,6 +3308,7 @@ out_bydst:
void xfrm_state_fini(struct net *net)
{
unsigned int sz;
+ int i;
flush_work(&net->xfrm.state_hash_work);
xfrm_state_flush(net, 0, false);
@@ -3305,14 +3316,17 @@ void xfrm_state_fini(struct net *net)
WARN_ON(!list_empty(&net->xfrm.state_all));
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+ WARN_ON(!hlist_empty(net->xfrm.state_byseq + i));
+ WARN_ON(!hlist_empty(net->xfrm.state_byspi + i));
+ WARN_ON(!hlist_empty(net->xfrm.state_bysrc + i));
+ WARN_ON(!hlist_empty(net->xfrm.state_bydst + i));
+ }
+
sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
- WARN_ON(!hlist_empty(net->xfrm.state_byseq));
xfrm_hash_free(net->xfrm.state_byseq, sz);
- WARN_ON(!hlist_empty(net->xfrm.state_byspi));
xfrm_hash_free(net->xfrm.state_byspi, sz);
- WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
xfrm_hash_free(net->xfrm.state_bysrc, sz);
- WARN_ON(!hlist_empty(net->xfrm.state_bydst));
xfrm_hash_free(net->xfrm.state_bydst, sz);
free_percpu(net->xfrm.state_cache_input);
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 010c9e6638c0..403b5ecac2c5 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -947,8 +947,11 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
if (attrs[XFRMA_SA_PCPU]) {
x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]);
- if (x->pcpu_num >= num_possible_cpus())
+ if (x->pcpu_num >= num_possible_cpus()) {
+ err = -ERANGE;
+ NL_SET_ERR_MSG(extack, "pCPU number too big");
goto error;
+ }
}
err = __xfrm_init_state(x, extack);
@@ -3035,6 +3038,9 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
}
xfrm_state_free(x);
+ xfrm_dev_policy_delete(xp);
+ xfrm_dev_policy_free(xp);
+ security_xfrm_policy_free(xp->security);
kfree(xp);
return 0;