2013-03-27 22:23:24 +01:00
|
|
|
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- */
|
|
|
|
|
/* nm-linux-platform.c - Linux kernel & udev network configuration layer
|
|
|
|
|
*
|
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
|
* the Free Software Foundation; either version 2, or (at your option)
|
|
|
|
|
* any later version.
|
|
|
|
|
*
|
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
|
* with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
|
*
|
2018-06-09 13:56:21 +02:00
|
|
|
* Copyright (C) 2012 - 2018 Red Hat, Inc.
|
2013-03-27 22:23:24 +01:00
|
|
|
*/
|
2016-02-19 14:57:48 +01:00
|
|
|
#include "nm-default.h"
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2016-03-01 09:56:51 +01:00
|
|
|
#include "nm-linux-platform.h"
|
|
|
|
|
|
2019-02-15 12:46:13 +01:00
|
|
|
#include <arpa/inet.h>
|
|
|
|
|
#include <dlfcn.h>
|
2016-06-30 18:20:09 +02:00
|
|
|
#include <endian.h>
|
2013-04-03 16:10:38 +02:00
|
|
|
#include <fcntl.h>
|
2019-02-15 12:46:13 +01:00
|
|
|
#include <libudev.h>
|
2013-05-21 12:49:24 -03:00
|
|
|
#include <linux/ip.h>
|
2013-03-27 22:23:24 +01:00
|
|
|
#include <linux/if_arp.h>
|
2013-05-06 09:16:17 -04:00
|
|
|
#include <linux/if_link.h>
|
2013-04-25 15:46:39 -04:00
|
|
|
#include <linux/if_tun.h>
|
2013-05-21 12:49:24 -03:00
|
|
|
#include <linux/if_tunnel.h>
|
2017-12-22 10:24:25 +01:00
|
|
|
#include <linux/ip6_tunnel.h>
|
2019-02-15 12:46:13 +01:00
|
|
|
#include <netinet/icmp6.h>
|
|
|
|
|
#include <netinet/in.h>
|
|
|
|
|
#include <poll.h>
|
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
|
#include <sys/socket.h>
|
|
|
|
|
#include <unistd.h>
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2016-03-01 09:56:51 +01:00
|
|
|
#include "nm-utils.h"
|
2015-02-22 11:54:03 +01:00
|
|
|
#include "nm-core-internal.h"
|
2016-03-01 09:56:51 +01:00
|
|
|
#include "nm-setting-vlan.h"
|
|
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
#include "nm-utils/nm-secret-utils.h"
|
2019-02-12 11:05:43 +01:00
|
|
|
#include "nm-utils/nm-c-list.h"
|
2018-01-14 15:07:46 +01:00
|
|
|
#include "nm-netlink.h"
|
2016-03-01 09:56:51 +01:00
|
|
|
#include "nm-core-utils.h"
|
|
|
|
|
#include "nmp-object.h"
|
2016-02-19 01:06:28 +01:00
|
|
|
#include "nmp-netns.h"
|
2015-05-02 07:59:59 +02:00
|
|
|
#include "nm-platform-utils.h"
|
2017-06-29 11:18:10 +02:00
|
|
|
#include "nm-platform-private.h"
|
2018-06-09 13:56:21 +02:00
|
|
|
#include "wifi/nm-wifi-utils.h"
|
|
|
|
|
#include "wifi/nm-wifi-utils-wext.h"
|
2018-06-05 15:20:54 +02:00
|
|
|
#include "wpan/nm-wpan-utils.h"
|
2016-10-08 14:39:19 +02:00
|
|
|
#include "nm-utils/unaligned.h"
|
2018-08-30 15:13:52 +02:00
|
|
|
#include "nm-utils/nm-io-utils.h"
|
2017-03-12 15:54:02 +01:00
|
|
|
#include "nm-utils/nm-udev-utils.h"
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2017-12-11 09:42:14 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
/* re-implement <linux/tc_act/tc_defact.h> to build against kernel
|
|
|
|
|
* headers that lack this. */
|
|
|
|
|
|
|
|
|
|
#include <linux/pkt_cls.h>
|
|
|
|
|
|
|
|
|
|
struct tc_defact {
|
|
|
|
|
tc_gen;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
enum {
|
|
|
|
|
TCA_DEF_UNSPEC,
|
|
|
|
|
TCA_DEF_TM,
|
|
|
|
|
TCA_DEF_PARMS,
|
|
|
|
|
TCA_DEF_DATA,
|
|
|
|
|
TCA_DEF_PAD,
|
|
|
|
|
__TCA_DEF_MAX
|
|
|
|
|
};
|
|
|
|
|
#define TCA_DEF_MAX (__TCA_DEF_MAX - 1)
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2015-10-27 17:24:11 +01:00
|
|
|
#define VLAN_FLAG_MVRP 0x8
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-04-18 14:21:54 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
#define IFQDISCSIZ 32
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-10-12 16:07:01 +02:00
|
|
|
|
|
|
|
|
#ifndef IFLA_PROMISCUITY
|
|
|
|
|
#define IFLA_PROMISCUITY 30
|
|
|
|
|
#endif
|
|
|
|
|
#define IFLA_NUM_TX_QUEUES 31
|
|
|
|
|
#define IFLA_NUM_RX_QUEUES 32
|
|
|
|
|
#define IFLA_CARRIER 33
|
|
|
|
|
#define IFLA_PHYS_PORT_ID 34
|
|
|
|
|
#define IFLA_LINK_NETNSID 37
|
|
|
|
|
#define __IFLA_MAX 39
|
|
|
|
|
|
|
|
|
|
#define IFLA_INET6_TOKEN 7
|
|
|
|
|
#define IFLA_INET6_ADDR_GEN_MODE 8
|
|
|
|
|
#define __IFLA_INET6_MAX 9
|
|
|
|
|
|
|
|
|
|
#define IFLA_VLAN_PROTOCOL 5
|
|
|
|
|
#define __IFLA_VLAN_MAX 6
|
|
|
|
|
|
|
|
|
|
#define IFA_FLAGS 8
|
|
|
|
|
#define __IFA_MAX 9
|
|
|
|
|
|
2015-10-12 15:15:21 +02:00
|
|
|
#define IFLA_MACVLAN_FLAGS 2
|
|
|
|
|
#define __IFLA_MACVLAN_MAX 3
|
|
|
|
|
|
2015-11-11 18:41:48 +01:00
|
|
|
#define IFLA_IPTUN_LINK 1
|
|
|
|
|
#define IFLA_IPTUN_LOCAL 2
|
|
|
|
|
#define IFLA_IPTUN_REMOTE 3
|
|
|
|
|
#define IFLA_IPTUN_TTL 4
|
|
|
|
|
#define IFLA_IPTUN_TOS 5
|
2015-11-27 22:22:25 +01:00
|
|
|
#define IFLA_IPTUN_ENCAP_LIMIT 6
|
|
|
|
|
#define IFLA_IPTUN_FLOWINFO 7
|
2015-11-11 18:41:48 +01:00
|
|
|
#define IFLA_IPTUN_FLAGS 8
|
|
|
|
|
#define IFLA_IPTUN_PROTO 9
|
|
|
|
|
#define IFLA_IPTUN_PMTUDISC 10
|
|
|
|
|
#define __IFLA_IPTUN_MAX 19
|
|
|
|
|
#ifndef IFLA_IPTUN_MAX
|
|
|
|
|
#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
|
|
|
|
|
#endif
|
|
|
|
|
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
#define IFLA_TUN_UNSPEC 0
|
|
|
|
|
#define IFLA_TUN_OWNER 1
|
|
|
|
|
#define IFLA_TUN_GROUP 2
|
|
|
|
|
#define IFLA_TUN_TYPE 3
|
|
|
|
|
#define IFLA_TUN_PI 4
|
|
|
|
|
#define IFLA_TUN_VNET_HDR 5
|
|
|
|
|
#define IFLA_TUN_PERSIST 6
|
|
|
|
|
#define IFLA_TUN_MULTI_QUEUE 7
|
|
|
|
|
#define IFLA_TUN_NUM_QUEUES 8
|
|
|
|
|
#define IFLA_TUN_NUM_DISABLED_QUEUES 9
|
|
|
|
|
#define __IFLA_TUN_MAX 10
|
|
|
|
|
#define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1)
|
2017-10-11 09:15:04 +02:00
|
|
|
|
|
|
|
|
static const gboolean RTA_PREF_SUPPORTED_AT_COMPILETIME = (RTA_MAX >= 20 /* RTA_PREF */);
|
|
|
|
|
|
2017-10-09 11:09:16 +02:00
|
|
|
G_STATIC_ASSERT (RTA_MAX == (__RTA_MAX - 1));
|
|
|
|
|
#define RTA_PREF 20
|
|
|
|
|
#undef RTA_MAX
|
|
|
|
|
#define RTA_MAX (MAX ((__RTA_MAX - 1), RTA_PREF))
|
|
|
|
|
|
2015-10-12 15:15:21 +02:00
|
|
|
#ifndef MACVLAN_FLAG_NOPROMISC
|
|
|
|
|
#define MACVLAN_FLAG_NOPROMISC 1
|
|
|
|
|
#endif
|
|
|
|
|
|
2015-11-27 22:22:25 +01:00
|
|
|
#define IP6_FLOWINFO_TCLASS_MASK 0x0FF00000
|
|
|
|
|
#define IP6_FLOWINFO_TCLASS_SHIFT 20
|
|
|
|
|
#define IP6_FLOWINFO_FLOWLABEL_MASK 0x000FFFFF
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-10-12 16:07:01 +02:00
|
|
|
|
2018-03-09 15:48:24 +01:00
|
|
|
/* Appeared in in kernel prior to 3.13 dated 19 January, 2014 */
|
|
|
|
|
#ifndef ARPHRD_6LOWPAN
|
|
|
|
|
#define ARPHRD_6LOWPAN 825
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2016-06-30 18:20:09 +02:00
|
|
|
#define IFLA_MACSEC_UNSPEC 0
|
|
|
|
|
#define IFLA_MACSEC_SCI 1
|
|
|
|
|
#define IFLA_MACSEC_PORT 2
|
|
|
|
|
#define IFLA_MACSEC_ICV_LEN 3
|
|
|
|
|
#define IFLA_MACSEC_CIPHER_SUITE 4
|
|
|
|
|
#define IFLA_MACSEC_WINDOW 5
|
|
|
|
|
#define IFLA_MACSEC_ENCODING_SA 6
|
|
|
|
|
#define IFLA_MACSEC_ENCRYPT 7
|
|
|
|
|
#define IFLA_MACSEC_PROTECT 8
|
|
|
|
|
#define IFLA_MACSEC_INC_SCI 9
|
|
|
|
|
#define IFLA_MACSEC_ES 10
|
|
|
|
|
#define IFLA_MACSEC_SCB 11
|
|
|
|
|
#define IFLA_MACSEC_REPLAY_PROTECT 12
|
|
|
|
|
#define IFLA_MACSEC_VALIDATION 13
|
|
|
|
|
#define IFLA_MACSEC_PAD 14
|
|
|
|
|
#define __IFLA_MACSEC_MAX 15
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2018-03-13 13:35:35 +00:00
|
|
|
#define WG_CMD_GET_DEVICE 0
|
|
|
|
|
#define WG_CMD_SET_DEVICE 1
|
|
|
|
|
|
2018-12-25 18:41:28 +01:00
|
|
|
#define WGDEVICE_F_REPLACE_PEERS ((guint32) (1U << 0))
|
|
|
|
|
|
|
|
|
|
#define WGPEER_F_REMOVE_ME ((guint32) (1U << 0))
|
|
|
|
|
#define WGPEER_F_REPLACE_ALLOWEDIPS ((guint32) (1U << 1))
|
|
|
|
|
|
|
|
|
|
|
2018-03-13 13:35:35 +00:00
|
|
|
#define WGDEVICE_A_UNSPEC 0
|
|
|
|
|
#define WGDEVICE_A_IFINDEX 1
|
|
|
|
|
#define WGDEVICE_A_IFNAME 2
|
|
|
|
|
#define WGDEVICE_A_PRIVATE_KEY 3
|
|
|
|
|
#define WGDEVICE_A_PUBLIC_KEY 4
|
|
|
|
|
#define WGDEVICE_A_FLAGS 5
|
|
|
|
|
#define WGDEVICE_A_LISTEN_PORT 6
|
|
|
|
|
#define WGDEVICE_A_FWMARK 7
|
|
|
|
|
#define WGDEVICE_A_PEERS 8
|
|
|
|
|
#define WGDEVICE_A_MAX 8
|
|
|
|
|
|
|
|
|
|
#define WGPEER_A_UNSPEC 0
|
|
|
|
|
#define WGPEER_A_PUBLIC_KEY 1
|
|
|
|
|
#define WGPEER_A_PRESHARED_KEY 2
|
|
|
|
|
#define WGPEER_A_FLAGS 3
|
|
|
|
|
#define WGPEER_A_ENDPOINT 4
|
|
|
|
|
#define WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL 5
|
|
|
|
|
#define WGPEER_A_LAST_HANDSHAKE_TIME 6
|
|
|
|
|
#define WGPEER_A_RX_BYTES 7
|
|
|
|
|
#define WGPEER_A_TX_BYTES 8
|
|
|
|
|
#define WGPEER_A_ALLOWEDIPS 9
|
|
|
|
|
#define WGPEER_A_MAX 9
|
|
|
|
|
|
|
|
|
|
#define WGALLOWEDIP_A_UNSPEC 0
|
|
|
|
|
#define WGALLOWEDIP_A_FAMILY 1
|
|
|
|
|
#define WGALLOWEDIP_A_IPADDR 2
|
|
|
|
|
#define WGALLOWEDIP_A_CIDR_MASK 3
|
|
|
|
|
#define WGALLOWEDIP_A_MAX 3
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2018-05-23 14:33:24 +02:00
|
|
|
/* Redefine VF enums and structures that are not available on older kernels. */
|
|
|
|
|
|
|
|
|
|
#define IFLA_VF_UNSPEC 0
|
|
|
|
|
#define IFLA_VF_MAC 1
|
|
|
|
|
#define IFLA_VF_VLAN 2
|
|
|
|
|
#define IFLA_VF_TX_RATE 3
|
|
|
|
|
#define IFLA_VF_SPOOFCHK 4
|
|
|
|
|
#define IFLA_VF_LINK_STATE 5
|
|
|
|
|
#define IFLA_VF_RATE 6
|
|
|
|
|
#define IFLA_VF_RSS_QUERY_EN 7
|
|
|
|
|
#define IFLA_VF_STATS 8
|
|
|
|
|
#define IFLA_VF_TRUST 9
|
|
|
|
|
#define IFLA_VF_IB_NODE_GUID 10
|
|
|
|
|
#define IFLA_VF_IB_PORT_GUID 11
|
|
|
|
|
#define IFLA_VF_VLAN_LIST 12
|
|
|
|
|
|
|
|
|
|
#define IFLA_VF_VLAN_INFO_UNSPEC 0
|
|
|
|
|
#define IFLA_VF_VLAN_INFO 1
|
|
|
|
|
|
|
|
|
|
/* valid for TRUST, SPOOFCHK, LINK_STATE, RSS_QUERY_EN */
|
|
|
|
|
struct _ifla_vf_setting {
|
|
|
|
|
guint32 vf;
|
|
|
|
|
guint32 setting;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct _ifla_vf_rate {
|
|
|
|
|
guint32 vf;
|
|
|
|
|
guint32 min_tx_rate;
|
|
|
|
|
guint32 max_tx_rate;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct _ifla_vf_vlan_info {
|
|
|
|
|
guint32 vf;
|
|
|
|
|
guint32 vlan; /* 0 - 4095, 0 disables VLAN filter */
|
|
|
|
|
guint32 qos;
|
|
|
|
|
guint16 vlan_proto; /* VLAN protocol, either 802.1Q or 802.1ad */
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2016-04-20 12:06:43 +02:00
|
|
|
typedef enum {
|
|
|
|
|
INFINIBAND_ACTION_CREATE_CHILD,
|
|
|
|
|
INFINIBAND_ACTION_DELETE_CHILD,
|
|
|
|
|
} InfinibandAction;
|
|
|
|
|
|
2017-10-23 12:48:49 +02:00
|
|
|
typedef enum {
|
|
|
|
|
CHANGE_LINK_TYPE_UNSPEC,
|
|
|
|
|
CHANGE_LINK_TYPE_SET_MTU,
|
2017-10-23 13:17:21 +02:00
|
|
|
CHANGE_LINK_TYPE_SET_ADDRESS,
|
2017-10-23 12:48:49 +02:00
|
|
|
} ChangeLinkType;
|
|
|
|
|
|
2017-10-23 13:17:21 +02:00
|
|
|
typedef struct {
|
|
|
|
|
union {
|
|
|
|
|
struct {
|
|
|
|
|
gconstpointer address;
|
|
|
|
|
gsize length;
|
|
|
|
|
} set_address;
|
|
|
|
|
};
|
|
|
|
|
} ChangeLinkData;
|
|
|
|
|
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
enum {
|
|
|
|
|
DELAYED_ACTION_IDX_REFRESH_ALL_LINKS,
|
|
|
|
|
DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ADDRESSES,
|
|
|
|
|
DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ADDRESSES,
|
|
|
|
|
DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ROUTES,
|
|
|
|
|
DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ROUTES,
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_IDX_REFRESH_ALL_QDISCS,
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_IDX_REFRESH_ALL_TFILTERS,
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
_DELAYED_ACTION_IDX_REFRESH_ALL_NUM,
|
|
|
|
|
};
|
|
|
|
|
|
2015-04-06 18:29:36 +02:00
|
|
|
typedef enum {
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
DELAYED_ACTION_TYPE_NONE = 0,
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS = (1LL << /* 0 */ DELAYED_ACTION_IDX_REFRESH_ALL_LINKS),
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES = (1LL << /* 1 */ DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ADDRESSES),
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES = (1LL << /* 2 */ DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ADDRESSES),
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES = (1LL << /* 3 */ DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ROUTES),
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES = (1LL << /* 4 */ DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ROUTES),
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_QDISCS = (1LL << /* 5 */ DELAYED_ACTION_IDX_REFRESH_ALL_QDISCS),
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_TFILTERS = (1LL << /* 6 */ DELAYED_ACTION_IDX_REFRESH_ALL_TFILTERS),
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_LINK = (1LL << 7),
|
|
|
|
|
DELAYED_ACTION_TYPE_MASTER_CONNECTED = (1LL << 11),
|
|
|
|
|
DELAYED_ACTION_TYPE_READ_NETLINK = (1LL << 12),
|
|
|
|
|
DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE = (1LL << 13),
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
__DELAYED_ACTION_TYPE_MAX,
|
|
|
|
|
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL = DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES |
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_QDISCS |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_TFILTERS,
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
|
|
|
|
DELAYED_ACTION_TYPE_MAX = __DELAYED_ACTION_TYPE_MAX -1,
|
2015-04-06 18:29:36 +02:00
|
|
|
} DelayedActionType;
|
|
|
|
|
|
2016-04-07 17:14:03 +02:00
|
|
|
#define FOR_EACH_DELAYED_ACTION(iflags, flags_all) \
|
|
|
|
|
for ((iflags) = (DelayedActionType) 0x1LL; (iflags) <= DELAYED_ACTION_TYPE_MAX; (iflags) <<= 1) \
|
2017-12-11 16:38:42 +01:00
|
|
|
if (NM_FLAGS_ANY (flags_all, iflags))
|
2016-04-07 17:14:03 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
typedef enum {
|
|
|
|
|
/* Negative values are errors from kernel. Add dummy member to
|
|
|
|
|
* make enum signed. */
|
2019-01-31 16:53:45 +01:00
|
|
|
_WAIT_FOR_NL_RESPONSE_RESULT_SYSTEM_ERROR = G_MININT,
|
2015-12-14 14:47:41 +01:00
|
|
|
|
|
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN = 0,
|
|
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK,
|
|
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_UNKNOWN,
|
|
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_RESYNC,
|
|
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_POLL,
|
|
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_TIMEOUT,
|
|
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_DISPOSING,
|
2018-02-19 13:42:03 +01:00
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_SETNS,
|
2015-12-14 14:47:41 +01:00
|
|
|
} WaitForNlResponseResult;
|
|
|
|
|
|
2018-09-04 16:43:44 +02:00
|
|
|
typedef enum {
|
|
|
|
|
DELAYED_ACTION_RESPONSE_TYPE_VOID = 0,
|
|
|
|
|
DELAYED_ACTION_RESPONSE_TYPE_REFRESH_ALL_IN_PROGRESS = 1,
|
|
|
|
|
DELAYED_ACTION_RESPONSE_TYPE_ROUTE_GET = 2,
|
|
|
|
|
} DelayedActionWaitForNlResponseType;
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
guint32 seq_number;
|
|
|
|
|
WaitForNlResponseResult seq_result;
|
|
|
|
|
DelayedActionWaitForNlResponseType response_type;
|
|
|
|
|
gint64 timeout_abs_ns;
|
|
|
|
|
WaitForNlResponseResult *out_seq_result;
|
|
|
|
|
char **out_errmsg;
|
|
|
|
|
union {
|
|
|
|
|
int *out_refresh_all_in_progress;
|
|
|
|
|
NMPObject **out_route_get;
|
|
|
|
|
gpointer out_data;
|
|
|
|
|
} response;
|
|
|
|
|
} DelayedActionWaitForNlResponseData;
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
struct nl_sock *genl;
|
|
|
|
|
|
|
|
|
|
struct nl_sock *nlh;
|
|
|
|
|
guint32 nlh_seq_next;
|
|
|
|
|
#if NM_MORE_LOGGING
|
|
|
|
|
guint32 nlh_seq_last_handled;
|
|
|
|
|
#endif
|
|
|
|
|
guint32 nlh_seq_last_seen;
|
|
|
|
|
GIOChannel *event_channel;
|
|
|
|
|
guint event_id;
|
|
|
|
|
|
|
|
|
|
bool pruning[_DELAYED_ACTION_IDX_REFRESH_ALL_NUM];
|
|
|
|
|
|
|
|
|
|
GHashTable *sysctl_get_prev_values;
|
2019-02-05 11:11:16 +01:00
|
|
|
CList sysctl_list;
|
2018-09-04 16:43:44 +02:00
|
|
|
|
|
|
|
|
NMUdevClient *udev_client;
|
|
|
|
|
|
|
|
|
|
struct {
|
|
|
|
|
/* which delayed actions are scheduled, as marked in @flags.
|
|
|
|
|
* Some types have additional arguments in the fields below. */
|
|
|
|
|
DelayedActionType flags;
|
|
|
|
|
|
|
|
|
|
/* counter that a refresh all action is in progress, separated
|
|
|
|
|
* by type. */
|
|
|
|
|
int refresh_all_in_progress[_DELAYED_ACTION_IDX_REFRESH_ALL_NUM];
|
|
|
|
|
|
|
|
|
|
GPtrArray *list_master_connected;
|
|
|
|
|
GPtrArray *list_refresh_link;
|
|
|
|
|
GArray *list_wait_for_nl_response;
|
|
|
|
|
|
|
|
|
|
int is_handling;
|
|
|
|
|
} delayed_action;
|
|
|
|
|
} NMLinuxPlatformPrivate;
|
|
|
|
|
|
|
|
|
|
struct _NMLinuxPlatform {
|
|
|
|
|
NMPlatform parent;
|
|
|
|
|
NMLinuxPlatformPrivate _priv;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct _NMLinuxPlatformClass {
|
|
|
|
|
NMPlatformClass parent;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
G_DEFINE_TYPE (NMLinuxPlatform, nm_linux_platform, NM_TYPE_PLATFORM)
|
|
|
|
|
|
|
|
|
|
#define NM_LINUX_PLATFORM_GET_PRIVATE(self) _NM_GET_PRIVATE (self, NMLinuxPlatform, NM_IS_LINUX_PLATFORM, NMPlatform)
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
#define _NMLOG_PREFIX_NAME "platform-linux"
|
|
|
|
|
#define _NMLOG_DOMAIN LOGD_PLATFORM
|
|
|
|
|
#define _NMLOG2_DOMAIN LOGD_PLATFORM
|
|
|
|
|
#define _NMLOG(level, ...) _LOG ( level, _NMLOG_DOMAIN, platform, __VA_ARGS__)
|
|
|
|
|
#define _NMLOG_err(errsv, level, ...) _LOG_err (errsv, level, _NMLOG_DOMAIN, platform, __VA_ARGS__)
|
|
|
|
|
#define _NMLOG2(level, ...) _LOG ( level, _NMLOG2_DOMAIN, NULL, __VA_ARGS__)
|
|
|
|
|
#define _NMLOG2_err(errsv, level, ...) _LOG_err (errsv, level, _NMLOG2_DOMAIN, NULL, __VA_ARGS__)
|
|
|
|
|
|
|
|
|
|
#define _LOG_print(__level, __domain, __errsv, self, ...) \
|
|
|
|
|
G_STMT_START { \
|
|
|
|
|
char __prefix[32]; \
|
|
|
|
|
const char *__p_prefix = _NMLOG_PREFIX_NAME; \
|
|
|
|
|
NMPlatform *const __self = (self); \
|
|
|
|
|
\
|
|
|
|
|
if (__self && nm_platform_get_log_with_ptr (__self)) { \
|
|
|
|
|
g_snprintf (__prefix, sizeof (__prefix), "%s[%p]", _NMLOG_PREFIX_NAME, __self); \
|
|
|
|
|
__p_prefix = __prefix; \
|
|
|
|
|
} \
|
|
|
|
|
_nm_log (__level, __domain, __errsv, NULL, NULL, \
|
|
|
|
|
"%s: " _NM_UTILS_MACRO_FIRST (__VA_ARGS__), \
|
|
|
|
|
__p_prefix _NM_UTILS_MACRO_REST (__VA_ARGS__)); \
|
|
|
|
|
} G_STMT_END
|
|
|
|
|
|
|
|
|
|
#define _LOG(level, domain, self, ...) \
|
|
|
|
|
G_STMT_START { \
|
|
|
|
|
const NMLogLevel __level = (level); \
|
|
|
|
|
const NMLogDomain __domain = (domain); \
|
|
|
|
|
\
|
|
|
|
|
if (nm_logging_enabled (__level, __domain)) { \
|
|
|
|
|
_LOG_print (__level, __domain, 0, self, __VA_ARGS__); \
|
|
|
|
|
} \
|
|
|
|
|
} G_STMT_END
|
|
|
|
|
|
|
|
|
|
#define _LOG_err(errsv, level, domain, self, ...) \
|
|
|
|
|
G_STMT_START { \
|
|
|
|
|
const NMLogLevel __level = (level); \
|
|
|
|
|
const NMLogDomain __domain = (domain); \
|
|
|
|
|
\
|
|
|
|
|
if (nm_logging_enabled (__level, __domain)) { \
|
|
|
|
|
int __errsv = (errsv); \
|
|
|
|
|
\
|
2018-09-14 23:49:20 -04:00
|
|
|
/* The %m format specifier (GNU extension) would already allow you to specify the error
|
2018-09-04 16:43:44 +02:00
|
|
|
* message conveniently (and nm_log would get that right too). But we don't want to depend
|
|
|
|
|
* on that, so instead append the message at the end.
|
|
|
|
|
* Currently users are expected not to use %m in the format string. */ \
|
|
|
|
|
_LOG_print (__level, __domain, __errsv, self, \
|
|
|
|
|
_NM_UTILS_MACRO_FIRST (__VA_ARGS__) ": %s (%d)" \
|
|
|
|
|
_NM_UTILS_MACRO_REST (__VA_ARGS__), \
|
2019-01-31 17:08:03 +01:00
|
|
|
nm_strerror_native (__errsv), __errsv); \
|
2018-09-04 16:43:44 +02:00
|
|
|
} \
|
|
|
|
|
} G_STMT_END
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
static void delayed_action_schedule (NMPlatform *platform, DelayedActionType action_type, gpointer user_data);
|
2015-06-19 15:38:41 +02:00
|
|
|
static gboolean delayed_action_handle_all (NMPlatform *platform, gboolean read_netlink);
|
2015-12-14 14:47:41 +01:00
|
|
|
static void do_request_link_no_delayed_actions (NMPlatform *platform, int ifindex, const char *name);
|
|
|
|
|
static void do_request_all_no_delayed_actions (NMPlatform *platform, DelayedActionType action_type);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
static void cache_on_change (NMPlatform *platform,
|
|
|
|
|
NMPCacheOpsType cache_op,
|
|
|
|
|
const NMPObject *obj_old,
|
|
|
|
|
const NMPObject *obj_new);
|
|
|
|
|
static void cache_prune_all (NMPlatform *platform);
|
2015-12-15 10:40:41 +01:00
|
|
|
static gboolean event_handler_read_netlink (NMPlatform *platform, gboolean wait_for_acks);
|
2018-05-15 20:29:30 +02:00
|
|
|
static struct nl_sock *_genl_sock (NMLinuxPlatform *platform);
|
2015-04-23 23:16:00 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
|
|
|
|
wait_for_nl_response_to_nmerr (WaitForNlResponseResult seq_result)
|
2017-08-21 15:33:57 +02:00
|
|
|
{
|
|
|
|
|
if (seq_result == WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK)
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return 0;
|
2017-08-21 15:33:57 +02:00
|
|
|
if (seq_result < 0)
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (int) seq_result;
|
|
|
|
|
return -NME_PL_NETLINK;
|
2017-08-21 15:33:57 +02:00
|
|
|
}
|
|
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
static const char *
|
2018-03-09 15:50:16 +01:00
|
|
|
wait_for_nl_response_to_string (WaitForNlResponseResult seq_result,
|
|
|
|
|
const char *errmsg,
|
|
|
|
|
char *buf, gsize buf_size)
|
2015-12-14 14:47:41 +01:00
|
|
|
{
|
|
|
|
|
char *buf0 = buf;
|
|
|
|
|
|
|
|
|
|
switch (seq_result) {
|
|
|
|
|
case WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN:
|
|
|
|
|
nm_utils_strbuf_append_str (&buf, &buf_size, "unknown");
|
|
|
|
|
break;
|
|
|
|
|
case WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK:
|
|
|
|
|
nm_utils_strbuf_append_str (&buf, &buf_size, "success");
|
|
|
|
|
break;
|
|
|
|
|
case WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_UNKNOWN:
|
|
|
|
|
nm_utils_strbuf_append_str (&buf, &buf_size, "failure");
|
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-03-09 15:50:16 +01:00
|
|
|
if (seq_result < 0) {
|
|
|
|
|
nm_utils_strbuf_append (&buf, &buf_size, "failure %d (%s%s%s)",
|
|
|
|
|
-((int) seq_result),
|
2019-01-31 17:08:03 +01:00
|
|
|
nm_strerror_native (-((int) seq_result)),
|
2018-03-09 15:50:16 +01:00
|
|
|
errmsg ? " - " : "",
|
|
|
|
|
errmsg ?: "");
|
|
|
|
|
}
|
2015-12-14 14:47:41 +01:00
|
|
|
else
|
|
|
|
|
nm_utils_strbuf_append (&buf, &buf_size, "internal failure %d", (int) seq_result);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return buf0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-16 11:58:40 +02:00
|
|
|
/*****************************************************************************
|
2015-10-13 17:53:23 +02:00
|
|
|
* Support IFLA_INET6_ADDR_GEN_MODE
|
2017-08-16 11:58:40 +02:00
|
|
|
*****************************************************************************/
|
2015-10-13 17:53:23 +02:00
|
|
|
|
|
|
|
|
static int _support_user_ipv6ll = 0;
|
|
|
|
|
#define _support_user_ipv6ll_still_undecided() (G_UNLIKELY (_support_user_ipv6ll == 0))
|
|
|
|
|
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
static void
|
|
|
|
|
_support_user_ipv6ll_detect (struct nlattr **tb)
|
2015-10-13 17:53:23 +02:00
|
|
|
{
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
gboolean supported;
|
2015-10-13 17:53:23 +02:00
|
|
|
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
nm_assert (_support_user_ipv6ll_still_undecided ());
|
|
|
|
|
|
2017-08-17 15:16:14 +02:00
|
|
|
/* IFLA_INET6_ADDR_GEN_MODE was added in kernel 3.17, dated 5 October, 2014. */
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
supported = !!tb[IFLA_INET6_ADDR_GEN_MODE];
|
|
|
|
|
_support_user_ipv6ll = supported ? 1 : -1;
|
|
|
|
|
_LOG2D ("kernel-support: IFLA_INET6_ADDR_GEN_MODE: %s",
|
|
|
|
|
supported ? "detected" : "not detected");
|
2015-10-13 17:53:23 +02:00
|
|
|
}
|
|
|
|
|
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
static gboolean
|
|
|
|
|
_support_user_ipv6ll_get (void)
|
2015-10-13 17:53:23 +02:00
|
|
|
{
|
|
|
|
|
if (_support_user_ipv6ll_still_undecided ()) {
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
_support_user_ipv6ll = 1;
|
|
|
|
|
_LOG2D ("kernel-support: IFLA_INET6_ADDR_GEN_MODE: %s", "failed to detect; assume support");
|
2015-10-13 17:53:23 +02:00
|
|
|
}
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
return _support_user_ipv6ll >= 0;
|
2015-10-13 17:53:23 +02:00
|
|
|
}
|
|
|
|
|
|
2017-08-16 11:58:40 +02:00
|
|
|
/*****************************************************************************
|
|
|
|
|
* extended IFA_FLAGS support
|
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
static int _support_kernel_extended_ifa_flags = 0;
|
2017-08-16 11:58:40 +02:00
|
|
|
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
#define _support_kernel_extended_ifa_flags_still_undecided() (G_UNLIKELY (_support_kernel_extended_ifa_flags == 0))
|
2017-08-16 11:58:40 +02:00
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_support_kernel_extended_ifa_flags_detect (struct nl_msg *msg)
|
|
|
|
|
{
|
|
|
|
|
struct nlmsghdr *msg_hdr;
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
gboolean support;
|
2017-08-16 11:58:40 +02:00
|
|
|
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
nm_assert (_support_kernel_extended_ifa_flags_still_undecided ());
|
|
|
|
|
nm_assert (msg);
|
2017-08-16 11:58:40 +02:00
|
|
|
|
|
|
|
|
msg_hdr = nlmsg_hdr (msg);
|
|
|
|
|
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
nm_assert (msg_hdr && msg_hdr->nlmsg_type == RTM_NEWADDR);
|
|
|
|
|
|
|
|
|
|
/* IFA_FLAGS is set for IPv4 and IPv6 addresses. It was added first to IPv6,
|
|
|
|
|
* but if we encounter an IPv4 address with IFA_FLAGS, we surely have support. */
|
|
|
|
|
if (NM_IN_SET (((struct ifaddrmsg *) nlmsg_data (msg_hdr))->ifa_family, AF_INET, AF_INET6))
|
2017-08-16 11:58:40 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* see if the nl_msg contains the IFA_FLAGS attribute. If it does,
|
|
|
|
|
* we assume, that the kernel supports extended flags, IFA_F_MANAGETEMPADDR
|
2017-08-17 15:16:14 +02:00
|
|
|
* and IFA_F_NOPREFIXROUTE for IPv6. They were added together in kernel 3.14,
|
|
|
|
|
* dated 30 March, 2014.
|
|
|
|
|
*
|
|
|
|
|
* For IPv4, IFA_F_NOPREFIXROUTE was added later, but there is no easy
|
|
|
|
|
* way to detect kernel support. */
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
support = !!nlmsg_find_attr (msg_hdr, sizeof (struct ifaddrmsg), IFA_FLAGS);
|
|
|
|
|
_support_kernel_extended_ifa_flags = support ? 1 : -1;
|
|
|
|
|
_LOG2D ("kernel-support: extended-ifa-flags: %s", support ? "detected" : "not detected");
|
2017-08-16 11:58:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_support_kernel_extended_ifa_flags_get (void)
|
|
|
|
|
{
|
|
|
|
|
if (_support_kernel_extended_ifa_flags_still_undecided ()) {
|
|
|
|
|
_LOG2D ("kernel-support: extended-ifa-flags: %s", "unable to detect kernel support for handling IPv6 temporary addresses. Assume support");
|
|
|
|
|
_support_kernel_extended_ifa_flags = 1;
|
|
|
|
|
}
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
return _support_kernel_extended_ifa_flags >= 0;
|
2017-08-16 11:58:40 +02:00
|
|
|
}
|
|
|
|
|
|
2017-10-11 09:15:04 +02:00
|
|
|
/*****************************************************************************
|
|
|
|
|
* Support RTA_PREF
|
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
static int _support_rta_pref = 0;
|
|
|
|
|
#define _support_rta_pref_still_undecided() (G_UNLIKELY (_support_rta_pref == 0))
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_support_rta_pref_detect (struct nlattr **tb)
|
|
|
|
|
{
|
|
|
|
|
gboolean supported;
|
|
|
|
|
|
|
|
|
|
nm_assert (_support_rta_pref_still_undecided ());
|
|
|
|
|
|
|
|
|
|
/* RTA_PREF was added in kernel 4.1, dated 21 June, 2015. */
|
|
|
|
|
supported = !!tb[RTA_PREF];
|
|
|
|
|
_support_rta_pref = supported ? 1 : -1;
|
|
|
|
|
_LOG2D ("kernel-support: RTA_PREF: ability to set router preference for IPv6 routes: %s",
|
|
|
|
|
supported ? "detected" : "not detected");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_support_rta_pref_get (void)
|
|
|
|
|
{
|
|
|
|
|
if (_support_rta_pref_still_undecided ()) {
|
|
|
|
|
/* if we couldn't detect support, we fallback on compile-time check, whether
|
|
|
|
|
* RTA_PREF is present in the kernel headers. */
|
|
|
|
|
_support_rta_pref = RTA_PREF_SUPPORTED_AT_COMPILETIME ? 1 : -1;
|
|
|
|
|
_LOG2D ("kernel-support: RTA_PREF: ability to set router preference for IPv6 routes: %s",
|
|
|
|
|
RTA_PREF_SUPPORTED_AT_COMPILETIME ? "assume support" : "assume no support");
|
|
|
|
|
}
|
|
|
|
|
return _support_rta_pref >= 0;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-13 17:53:23 +02:00
|
|
|
/******************************************************************
|
|
|
|
|
* Various utilities
|
|
|
|
|
******************************************************************/
|
|
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
static int
|
|
|
|
|
_vlan_qos_mapping_cmp_from (gconstpointer a, gconstpointer b, gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
const NMVlanQosMapping *map_a = a;
|
|
|
|
|
const NMVlanQosMapping *map_b = b;
|
|
|
|
|
|
|
|
|
|
if (map_a->from != map_b->from)
|
|
|
|
|
return map_a->from < map_b->from ? -1 : 1;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
_vlan_qos_mapping_cmp_from_ptr (gconstpointer a, gconstpointer b, gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
return _vlan_qos_mapping_cmp_from (*((const NMVlanQosMapping **) a),
|
|
|
|
|
*((const NMVlanQosMapping **) b),
|
|
|
|
|
NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-13 17:53:23 +02:00
|
|
|
/******************************************************************
|
|
|
|
|
* NMLinkType functions
|
|
|
|
|
******************************************************************/
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
const NMLinkType nm_type;
|
|
|
|
|
const char *type_string;
|
|
|
|
|
|
|
|
|
|
/* IFLA_INFO_KIND / rtnl_link_get_type() where applicable; the rtnl type
|
|
|
|
|
* should only be specified if the device type can be created without
|
|
|
|
|
* additional parameters, and if the device type can be determined from
|
|
|
|
|
* the rtnl_type. eg, tun/tap should not be specified since both
|
|
|
|
|
* tun and tap devices use "tun", and InfiniBand should not be
|
|
|
|
|
* specified because a PKey is required at creation. Drivers set this
|
|
|
|
|
* value from their 'struct rtnl_link_ops' structure.
|
|
|
|
|
*/
|
|
|
|
|
const char *rtnl_type;
|
|
|
|
|
|
|
|
|
|
/* uevent DEVTYPE where applicable, from /sys/class/net/<ifname>/uevent;
|
|
|
|
|
* drivers set this value from their SET_NETDEV_DEV() call and the
|
|
|
|
|
* 'struct device_type' name member.
|
|
|
|
|
*/
|
|
|
|
|
const char *devtype;
|
|
|
|
|
} LinkDesc;
|
|
|
|
|
|
|
|
|
|
static const LinkDesc linktypes[] = {
|
|
|
|
|
{ NM_LINK_TYPE_NONE, "none", NULL, NULL },
|
|
|
|
|
{ NM_LINK_TYPE_UNKNOWN, "unknown", NULL, NULL },
|
|
|
|
|
|
|
|
|
|
{ NM_LINK_TYPE_ETHERNET, "ethernet", NULL, NULL },
|
|
|
|
|
{ NM_LINK_TYPE_INFINIBAND, "infiniband", NULL, NULL },
|
|
|
|
|
{ NM_LINK_TYPE_OLPC_MESH, "olpc-mesh", NULL, NULL },
|
|
|
|
|
{ NM_LINK_TYPE_WIFI, "wifi", NULL, "wlan" },
|
2016-06-14 11:19:15 -05:00
|
|
|
{ NM_LINK_TYPE_WWAN_NET, "wwan", NULL, "wwan" },
|
2015-10-13 17:53:23 +02:00
|
|
|
{ NM_LINK_TYPE_WIMAX, "wimax", "wimax", "wimax" },
|
2018-03-09 15:48:24 +01:00
|
|
|
{ NM_LINK_TYPE_WPAN, "wpan", NULL, NULL },
|
2018-03-09 15:48:24 +01:00
|
|
|
{ NM_LINK_TYPE_6LOWPAN, "6lowpan", NULL, NULL },
|
2015-10-13 17:53:23 +02:00
|
|
|
|
2017-06-06 15:55:08 +02:00
|
|
|
{ NM_LINK_TYPE_BNEP, "bluetooth", NULL, "bluetooth" },
|
2015-10-13 17:53:23 +02:00
|
|
|
{ NM_LINK_TYPE_DUMMY, "dummy", "dummy", NULL },
|
|
|
|
|
{ NM_LINK_TYPE_GRE, "gre", "gre", NULL },
|
|
|
|
|
{ NM_LINK_TYPE_GRETAP, "gretap", "gretap", NULL },
|
|
|
|
|
{ NM_LINK_TYPE_IFB, "ifb", "ifb", NULL },
|
2015-11-27 22:22:25 +01:00
|
|
|
{ NM_LINK_TYPE_IP6TNL, "ip6tnl", "ip6tnl", NULL },
|
2018-06-26 12:06:43 +02:00
|
|
|
{ NM_LINK_TYPE_IP6GRE, "ip6gre", "ip6gre", NULL },
|
|
|
|
|
{ NM_LINK_TYPE_IP6GRETAP, "ip6gretap", "ip6gretap", NULL },
|
2015-11-27 14:01:56 +01:00
|
|
|
{ NM_LINK_TYPE_IPIP, "ipip", "ipip", NULL },
|
2015-10-13 17:53:23 +02:00
|
|
|
{ NM_LINK_TYPE_LOOPBACK, "loopback", NULL, NULL },
|
2016-06-30 18:20:09 +02:00
|
|
|
{ NM_LINK_TYPE_MACSEC, "macsec", "macsec", NULL },
|
2015-10-13 17:53:23 +02:00
|
|
|
{ NM_LINK_TYPE_MACVLAN, "macvlan", "macvlan", NULL },
|
|
|
|
|
{ NM_LINK_TYPE_MACVTAP, "macvtap", "macvtap", NULL },
|
|
|
|
|
{ NM_LINK_TYPE_OPENVSWITCH, "openvswitch", "openvswitch", NULL },
|
2017-06-06 15:55:08 +02:00
|
|
|
{ NM_LINK_TYPE_PPP, "ppp", NULL, "ppp" },
|
2015-11-11 18:41:48 +01:00
|
|
|
{ NM_LINK_TYPE_SIT, "sit", "sit", NULL },
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
{ NM_LINK_TYPE_TUN, "tun", "tun", NULL },
|
2015-10-13 17:53:23 +02:00
|
|
|
{ NM_LINK_TYPE_VETH, "veth", "veth", NULL },
|
|
|
|
|
{ NM_LINK_TYPE_VLAN, "vlan", "vlan", "vlan" },
|
|
|
|
|
{ NM_LINK_TYPE_VXLAN, "vxlan", "vxlan", "vxlan" },
|
2018-03-13 13:35:35 +00:00
|
|
|
{ NM_LINK_TYPE_WIREGUARD, "wireguard", "wireguard", "wireguard" },
|
2015-10-13 17:53:23 +02:00
|
|
|
|
|
|
|
|
{ NM_LINK_TYPE_BRIDGE, "bridge", "bridge", "bridge" },
|
|
|
|
|
{ NM_LINK_TYPE_BOND, "bond", "bond", "bond" },
|
|
|
|
|
{ NM_LINK_TYPE_TEAM, "team", "team", NULL },
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
|
nm_link_type_to_rtnl_type_string (NMLinkType type)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < G_N_ELEMENTS (linktypes); i++) {
|
|
|
|
|
if (type == linktypes[i].nm_type)
|
|
|
|
|
return linktypes[i].rtnl_type;
|
|
|
|
|
}
|
|
|
|
|
g_return_val_if_reached (NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const char *
|
|
|
|
|
nm_link_type_to_string (NMLinkType type)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < G_N_ELEMENTS (linktypes); i++) {
|
|
|
|
|
if (type == linktypes[i].nm_type)
|
|
|
|
|
return linktypes[i].type_string;
|
|
|
|
|
}
|
|
|
|
|
g_return_val_if_reached (NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/******************************************************************
|
|
|
|
|
* Utilities
|
|
|
|
|
******************************************************************/
|
|
|
|
|
|
|
|
|
|
/* _timestamp_nl_to_ms:
|
|
|
|
|
* @timestamp_nl: a timestamp from ifa_cacheinfo.
|
|
|
|
|
* @monotonic_ms: *now* in CLOCK_MONOTONIC. Needed to estimate the current
|
|
|
|
|
* uptime and how often timestamp_nl wrapped.
|
|
|
|
|
*
|
|
|
|
|
* Convert the timestamp from ifa_cacheinfo to CLOCK_MONOTONIC milliseconds.
|
|
|
|
|
* The ifa_cacheinfo fields tstamp and cstamp contains timestamps that counts
|
|
|
|
|
* with in 1/100th of a second of clock_gettime(CLOCK_MONOTONIC). However,
|
|
|
|
|
* the uint32 counter wraps every 497 days of uptime, so we have to compensate
|
|
|
|
|
* for that. */
|
|
|
|
|
static gint64
|
|
|
|
|
_timestamp_nl_to_ms (guint32 timestamp_nl, gint64 monotonic_ms)
|
|
|
|
|
{
|
|
|
|
|
const gint64 WRAP_INTERVAL = (((gint64) G_MAXUINT32) + 1) * (1000 / 100);
|
|
|
|
|
gint64 timestamp_nl_ms;
|
|
|
|
|
|
|
|
|
|
/* convert timestamp from 1/100th of a second to msec. */
|
|
|
|
|
timestamp_nl_ms = ((gint64) timestamp_nl) * (1000 / 100);
|
|
|
|
|
|
|
|
|
|
/* timestamp wraps every 497 days. Try to compensate for that.*/
|
|
|
|
|
if (timestamp_nl_ms > monotonic_ms) {
|
|
|
|
|
/* timestamp_nl_ms is in the future. Truncate it to *now* */
|
|
|
|
|
timestamp_nl_ms = monotonic_ms;
|
|
|
|
|
} else if (monotonic_ms >= WRAP_INTERVAL) {
|
|
|
|
|
timestamp_nl_ms += (monotonic_ms / WRAP_INTERVAL) * WRAP_INTERVAL;
|
|
|
|
|
if (timestamp_nl_ms > monotonic_ms)
|
|
|
|
|
timestamp_nl_ms -= WRAP_INTERVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return timestamp_nl_ms;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static guint32
|
2015-10-12 16:07:01 +02:00
|
|
|
_addrtime_timestamp_to_nm (guint32 timestamp, gint32 *out_now_nm)
|
2015-10-13 17:53:23 +02:00
|
|
|
{
|
|
|
|
|
struct timespec tp;
|
|
|
|
|
gint64 now_nl, now_nm, result;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
/* timestamp is unset. Default to 1. */
|
2015-10-12 16:07:01 +02:00
|
|
|
if (!timestamp) {
|
2015-10-13 17:53:23 +02:00
|
|
|
if (out_now_nm)
|
|
|
|
|
*out_now_nm = 0;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* do all the calculations in milliseconds scale */
|
|
|
|
|
|
|
|
|
|
err = clock_gettime (CLOCK_MONOTONIC, &tp);
|
|
|
|
|
g_assert (err == 0);
|
|
|
|
|
now_nm = nm_utils_get_monotonic_timestamp_ms ();
|
|
|
|
|
now_nl = (((gint64) tp.tv_sec) * ((gint64) 1000)) +
|
|
|
|
|
(tp.tv_nsec / (NM_UTILS_NS_PER_SECOND/1000));
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
result = now_nm - (now_nl - _timestamp_nl_to_ms (timestamp, now_nl));
|
2015-10-13 17:53:23 +02:00
|
|
|
|
|
|
|
|
if (out_now_nm)
|
|
|
|
|
*out_now_nm = now_nm / 1000;
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/* converting the timestamp into nm_utils_get_monotonic_timestamp_ms() scale is
|
2015-10-13 17:53:23 +02:00
|
|
|
* a good guess but fails in the following situations:
|
|
|
|
|
*
|
|
|
|
|
* - If the address existed before start of the process, the timestamp in nm scale would
|
|
|
|
|
* be negative or zero. In this case we default to 1.
|
2015-10-12 16:07:01 +02:00
|
|
|
* - during hibernation, the CLOCK_MONOTONIC/timestamp drifts from
|
2015-10-13 17:53:23 +02:00
|
|
|
* nm_utils_get_monotonic_timestamp_ms() scale.
|
|
|
|
|
*/
|
|
|
|
|
if (result <= 1000)
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
if (result > now_nm)
|
|
|
|
|
return now_nm / 1000;
|
|
|
|
|
|
|
|
|
|
return result / 1000;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static guint32
|
2015-10-12 16:07:01 +02:00
|
|
|
_addrtime_extend_lifetime (guint32 lifetime, guint32 seconds)
|
2015-10-13 17:53:23 +02:00
|
|
|
{
|
|
|
|
|
guint64 v;
|
|
|
|
|
|
|
|
|
|
if ( lifetime == NM_PLATFORM_LIFETIME_PERMANENT
|
|
|
|
|
|| seconds == 0)
|
|
|
|
|
return lifetime;
|
|
|
|
|
|
|
|
|
|
v = (guint64) lifetime + (guint64) seconds;
|
|
|
|
|
return MIN (v, NM_PLATFORM_LIFETIME_PERMANENT - 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* The rtnl_addr object contains relative lifetimes @valid and @preferred
|
|
|
|
|
* that count in seconds, starting from the moment when the kernel constructed
|
|
|
|
|
* the netlink message.
|
|
|
|
|
*
|
|
|
|
|
* There is also a field rtnl_addr_last_update_time(), which is the absolute
|
|
|
|
|
* time in 1/100th of a second of clock_gettime (CLOCK_MONOTONIC) when the address
|
|
|
|
|
* was modified (wrapping every 497 days).
|
|
|
|
|
* Immediately at the time when the address was last modified, #NOW and @last_update_time
|
|
|
|
|
* are the same, so (only) in that case @valid and @preferred are anchored at @last_update_time.
|
|
|
|
|
* However, this is not true in general. As time goes by, whenever kernel sends a new address
|
|
|
|
|
* via netlink, the lifetimes keep counting down.
|
|
|
|
|
**/
|
|
|
|
|
static void
|
2015-10-12 16:07:01 +02:00
|
|
|
_addrtime_get_lifetimes (guint32 timestamp,
|
|
|
|
|
guint32 lifetime,
|
|
|
|
|
guint32 preferred,
|
|
|
|
|
guint32 *out_timestamp,
|
|
|
|
|
guint32 *out_lifetime,
|
|
|
|
|
guint32 *out_preferred)
|
2015-10-13 17:53:23 +02:00
|
|
|
{
|
|
|
|
|
gint32 now;
|
|
|
|
|
|
|
|
|
|
if ( lifetime != NM_PLATFORM_LIFETIME_PERMANENT
|
|
|
|
|
|| preferred != NM_PLATFORM_LIFETIME_PERMANENT) {
|
|
|
|
|
if (preferred > lifetime)
|
|
|
|
|
preferred = lifetime;
|
2015-10-12 16:07:01 +02:00
|
|
|
timestamp = _addrtime_timestamp_to_nm (timestamp, &now);
|
2015-10-13 17:53:23 +02:00
|
|
|
|
|
|
|
|
if (now == 0) {
|
|
|
|
|
/* strange. failed to detect the last-update time and assumed that timestamp is 1. */
|
|
|
|
|
nm_assert (timestamp == 1);
|
|
|
|
|
now = nm_utils_get_monotonic_timestamp_s ();
|
|
|
|
|
}
|
|
|
|
|
if (timestamp < now) {
|
|
|
|
|
guint32 diff = now - timestamp;
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
lifetime = _addrtime_extend_lifetime (lifetime, diff);
|
|
|
|
|
preferred = _addrtime_extend_lifetime (preferred, diff);
|
2015-10-13 17:53:23 +02:00
|
|
|
} else
|
|
|
|
|
nm_assert (timestamp == now);
|
2015-10-12 16:07:01 +02:00
|
|
|
} else
|
|
|
|
|
timestamp = 0;
|
2015-10-13 17:53:23 +02:00
|
|
|
*out_timestamp = timestamp;
|
|
|
|
|
*out_lifetime = lifetime;
|
|
|
|
|
*out_preferred = preferred;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-10-12 16:07:01 +02:00
|
|
|
|
|
|
|
|
static const NMPObject *
|
2018-09-04 14:48:59 +02:00
|
|
|
_lookup_cached_link (const NMPCache *cache,
|
|
|
|
|
int ifindex,
|
|
|
|
|
gboolean *completed_from_cache,
|
|
|
|
|
const NMPObject **link_cached)
|
2015-10-12 16:07:01 +02:00
|
|
|
{
|
|
|
|
|
const NMPObject *obj;
|
|
|
|
|
|
|
|
|
|
nm_assert (completed_from_cache && link_cached);
|
|
|
|
|
|
|
|
|
|
if (!*completed_from_cache) {
|
2018-09-04 14:48:59 +02:00
|
|
|
obj = ifindex > 0 && cache
|
|
|
|
|
? nmp_cache_lookup_link (cache, ifindex)
|
|
|
|
|
: NULL;
|
2015-10-12 16:07:01 +02:00
|
|
|
|
2018-09-04 14:48:59 +02:00
|
|
|
*link_cached = obj;
|
2015-10-12 16:07:01 +02:00
|
|
|
*completed_from_cache = TRUE;
|
|
|
|
|
}
|
|
|
|
|
return *link_cached;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-10-12 16:07:01 +02:00
|
|
|
|
|
|
|
|
#define DEVTYPE_PREFIX "DEVTYPE="
|
|
|
|
|
|
|
|
|
|
static char *
|
2016-12-09 10:43:06 +01:00
|
|
|
_linktype_read_devtype (int dirfd)
|
2015-10-12 16:07:01 +02:00
|
|
|
{
|
|
|
|
|
char *contents = NULL;
|
|
|
|
|
char *cont, *end;
|
|
|
|
|
|
2016-12-09 10:43:06 +01:00
|
|
|
nm_assert (dirfd >= 0);
|
2016-04-21 11:10:13 +02:00
|
|
|
|
2018-08-30 13:56:05 +02:00
|
|
|
if (nm_utils_file_get_contents (dirfd, "uevent", 1*1024*1024,
|
|
|
|
|
NM_UTILS_FILE_GET_CONTENTS_FLAG_NONE,
|
|
|
|
|
&contents, NULL, NULL) < 0)
|
2015-10-12 16:07:01 +02:00
|
|
|
return NULL;
|
|
|
|
|
for (cont = contents; cont; cont = end) {
|
|
|
|
|
end = strpbrk (cont, "\r\n");
|
|
|
|
|
if (end)
|
|
|
|
|
*end++ = '\0';
|
2016-02-12 12:34:43 +01:00
|
|
|
if (strncmp (cont, DEVTYPE_PREFIX, NM_STRLEN (DEVTYPE_PREFIX)) == 0) {
|
|
|
|
|
cont += NM_STRLEN (DEVTYPE_PREFIX);
|
2015-10-12 16:07:01 +02:00
|
|
|
memmove (contents, cont, strlen (cont) + 1);
|
|
|
|
|
return contents;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
g_free (contents);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static NMLinkType
|
|
|
|
|
_linktype_get_type (NMPlatform *platform,
|
|
|
|
|
const NMPCache *cache,
|
|
|
|
|
const char *kind,
|
|
|
|
|
int ifindex,
|
|
|
|
|
const char *ifname,
|
|
|
|
|
unsigned flags,
|
|
|
|
|
unsigned arptype,
|
|
|
|
|
gboolean *completed_from_cache,
|
|
|
|
|
const NMPObject **link_cached,
|
|
|
|
|
const char **out_kind)
|
|
|
|
|
{
|
|
|
|
|
guint i;
|
|
|
|
|
|
2017-06-29 13:13:54 +02:00
|
|
|
NMTST_ASSERT_PLATFORM_NETNS_CURRENT (platform);
|
2016-12-12 14:06:44 +01:00
|
|
|
nm_assert (ifname);
|
2016-02-19 01:06:28 +01:00
|
|
|
|
2015-10-16 11:28:34 +02:00
|
|
|
if (completed_from_cache) {
|
2015-10-12 16:07:01 +02:00
|
|
|
const NMPObject *obj;
|
|
|
|
|
|
|
|
|
|
obj = _lookup_cached_link (cache, ifindex, completed_from_cache, link_cached);
|
2015-10-16 11:28:34 +02:00
|
|
|
|
|
|
|
|
/* If we detected the link type before, we stick to that
|
2016-04-26 10:21:51 +02:00
|
|
|
* decision unless the "kind" no "name" changed. If "name" changed,
|
|
|
|
|
* it means that their type may not have been determined correctly
|
|
|
|
|
* due to race conditions while accessing sysfs.
|
2015-10-16 11:28:34 +02:00
|
|
|
*
|
|
|
|
|
* This way, we save edditional ethtool/sysctl lookups, but moreover,
|
|
|
|
|
* we keep the linktype stable and don't change it as long as the link
|
|
|
|
|
* exists.
|
|
|
|
|
*
|
|
|
|
|
* Note that kernel *can* reuse the ifindex (on integer overflow, and
|
|
|
|
|
* when moving interfce to other netns). Thus here there is a tiny potential
|
|
|
|
|
* of messing stuff up. */
|
|
|
|
|
if ( obj
|
2018-09-04 14:48:59 +02:00
|
|
|
&& obj->_link.netlink.is_in_netlink
|
2015-10-16 11:28:34 +02:00
|
|
|
&& !NM_IN_SET (obj->link.type, NM_LINK_TYPE_UNKNOWN, NM_LINK_TYPE_NONE)
|
2016-12-12 14:06:44 +01:00
|
|
|
&& nm_streq (ifname, obj->link.name)
|
2015-10-16 11:28:34 +02:00
|
|
|
&& ( !kind
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
|| nm_streq0 (kind, obj->link.kind))) {
|
2015-10-16 11:28:34 +02:00
|
|
|
nm_assert (obj->link.kind == g_intern_string (obj->link.kind));
|
|
|
|
|
*out_kind = obj->link.kind;
|
|
|
|
|
return obj->link.type;
|
|
|
|
|
}
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
|
|
|
|
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
/* we intern kind to not require us to keep the pointer alive. Essentially
|
|
|
|
|
* leaking it in a global cache. That should be safe enough, because the
|
|
|
|
|
* kind comes only from kernel messages, which depend on the number of
|
|
|
|
|
* available drivers. So, there is not the danger that we leak uncontrolled
|
|
|
|
|
* many kinds. */
|
2015-10-16 11:28:34 +02:00
|
|
|
*out_kind = g_intern_string (kind);
|
2015-10-12 16:07:01 +02:00
|
|
|
|
|
|
|
|
if (kind) {
|
|
|
|
|
for (i = 0; i < G_N_ELEMENTS (linktypes); i++) {
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
if (nm_streq0 (kind, linktypes[i].rtnl_type)) {
|
2015-10-12 16:07:01 +02:00
|
|
|
return linktypes[i].nm_type;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (arptype == ARPHRD_LOOPBACK)
|
|
|
|
|
return NM_LINK_TYPE_LOOPBACK;
|
|
|
|
|
else if (arptype == ARPHRD_INFINIBAND)
|
|
|
|
|
return NM_LINK_TYPE_INFINIBAND;
|
2015-11-11 18:41:48 +01:00
|
|
|
else if (arptype == ARPHRD_SIT)
|
|
|
|
|
return NM_LINK_TYPE_SIT;
|
2015-11-27 22:22:25 +01:00
|
|
|
else if (arptype == ARPHRD_TUNNEL6)
|
|
|
|
|
return NM_LINK_TYPE_IP6TNL;
|
2017-06-06 15:55:08 +02:00
|
|
|
else if (arptype == ARPHRD_PPP)
|
|
|
|
|
return NM_LINK_TYPE_PPP;
|
2018-03-09 15:48:24 +01:00
|
|
|
else if (arptype == ARPHRD_IEEE802154)
|
|
|
|
|
return NM_LINK_TYPE_WPAN;
|
2018-03-09 15:48:24 +01:00
|
|
|
else if (arptype == ARPHRD_6LOWPAN)
|
|
|
|
|
return NM_LINK_TYPE_6LOWPAN;
|
2015-10-12 16:07:01 +02:00
|
|
|
|
2016-12-12 14:06:44 +01:00
|
|
|
{
|
2016-12-12 13:47:52 +01:00
|
|
|
NMPUtilsEthtoolDriverInfo driver_info;
|
2015-10-12 16:07:01 +02:00
|
|
|
|
|
|
|
|
/* Fallback OVS detection for kernel <= 3.16 */
|
2016-12-12 13:47:52 +01:00
|
|
|
if (nmp_utils_ethtool_get_driver_info (ifindex, &driver_info)) {
|
|
|
|
|
if (nm_streq (driver_info.driver, "openvswitch"))
|
2015-10-12 16:07:01 +02:00
|
|
|
return NM_LINK_TYPE_OPENVSWITCH;
|
|
|
|
|
|
|
|
|
|
if (arptype == 256) {
|
|
|
|
|
/* Some s390 CTC-type devices report 256 for the encapsulation type
|
|
|
|
|
* for some reason, but we need to call them Ethernet.
|
|
|
|
|
*/
|
2016-12-12 13:47:52 +01:00
|
|
|
if (nm_streq (driver_info.driver, "ctcm"))
|
2015-10-12 16:07:01 +02:00
|
|
|
return NM_LINK_TYPE_ETHERNET;
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-12-12 14:06:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
nm_auto_close int dirfd = -1;
|
|
|
|
|
gs_free char *devtype = NULL;
|
|
|
|
|
char ifname_verified[IFNAMSIZ];
|
2015-10-12 16:07:01 +02:00
|
|
|
|
2016-12-09 10:43:06 +01:00
|
|
|
dirfd = nmp_utils_sysctl_open_netdir (ifindex, ifname, ifname_verified);
|
|
|
|
|
if (dirfd >= 0) {
|
|
|
|
|
if (faccessat (dirfd, "anycast_mask", F_OK, 0) == 0)
|
|
|
|
|
return NM_LINK_TYPE_OLPC_MESH;
|
|
|
|
|
|
|
|
|
|
devtype = _linktype_read_devtype (dirfd);
|
|
|
|
|
for (i = 0; devtype && i < G_N_ELEMENTS (linktypes); i++) {
|
|
|
|
|
if (g_strcmp0 (devtype, linktypes[i].devtype) == 0) {
|
|
|
|
|
if (linktypes[i].nm_type == NM_LINK_TYPE_BNEP) {
|
|
|
|
|
/* Both BNEP and 6lowpan use DEVTYPE=bluetooth, so we must
|
|
|
|
|
* use arptype to distinguish between them.
|
|
|
|
|
*/
|
|
|
|
|
if (arptype != ARPHRD_ETHER)
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
return linktypes[i].nm_type;
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-09 10:43:06 +01:00
|
|
|
/* Fallback for drivers that don't call SET_NETDEV_DEVTYPE() */
|
2018-06-09 13:56:21 +02:00
|
|
|
if (nm_wifi_utils_is_wifi (dirfd, ifname_verified))
|
2016-12-09 10:43:06 +01:00
|
|
|
return NM_LINK_TYPE_WIFI;
|
|
|
|
|
}
|
2015-10-12 16:07:01 +02:00
|
|
|
|
2016-01-20 12:45:21 +01:00
|
|
|
if (arptype == ARPHRD_ETHER) {
|
2016-06-14 11:35:17 -05:00
|
|
|
/* Misc non-upstream WWAN drivers. rmnet is Qualcomm's proprietary
|
|
|
|
|
* modem interface, ccmni is MediaTek's. FIXME: these drivers should
|
|
|
|
|
* really set devtype=WWAN.
|
|
|
|
|
*/
|
|
|
|
|
if (g_str_has_prefix (ifname, "rmnet") ||
|
|
|
|
|
g_str_has_prefix (ifname, "rev_rmnet") ||
|
|
|
|
|
g_str_has_prefix (ifname, "ccmni"))
|
|
|
|
|
return NM_LINK_TYPE_WWAN_NET;
|
|
|
|
|
|
2016-01-20 12:45:21 +01:00
|
|
|
/* Standard wired ethernet interfaces don't report an rtnl_link_type, so
|
|
|
|
|
* only allow fallback to Ethernet if no type is given. This should
|
|
|
|
|
* prevent future virtual network drivers from being treated as Ethernet
|
|
|
|
|
* when they should be Generic instead.
|
|
|
|
|
*/
|
|
|
|
|
if (!kind && !devtype)
|
|
|
|
|
return NM_LINK_TYPE_ETHERNET;
|
|
|
|
|
/* The USB gadget interfaces behave and look like ordinary ethernet devices
|
|
|
|
|
* aside from the DEVTYPE. */
|
|
|
|
|
if (!g_strcmp0 (devtype, "gadget"))
|
|
|
|
|
return NM_LINK_TYPE_ETHERNET;
|
2017-10-05 22:35:25 +02:00
|
|
|
|
|
|
|
|
/* Distributed Switch Architecture switch chips */
|
|
|
|
|
if (!g_strcmp0 (devtype, "dsa"))
|
|
|
|
|
return NM_LINK_TYPE_ETHERNET;
|
2016-01-20 12:45:21 +01:00
|
|
|
}
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NM_LINK_TYPE_UNKNOWN;
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-22 18:19:54 +02:00
|
|
|
/******************************************************************
|
|
|
|
|
* libnl unility functions and wrappers
|
|
|
|
|
******************************************************************/
|
|
|
|
|
|
2017-08-16 16:13:24 +02:00
|
|
|
#define NLMSG_TAIL(nmsg) \
|
2019-02-17 11:12:02 +01:00
|
|
|
((struct rtattr *) (((char *) (nmsg)) + NLMSG_ALIGN ((nmsg)->nlmsg_len)))
|
2017-08-16 16:13:24 +02:00
|
|
|
|
|
|
|
|
/* copied from iproute2's addattr_l(). */
|
|
|
|
|
static gboolean
|
|
|
|
|
_nl_addattr_l (struct nlmsghdr *n,
|
|
|
|
|
int maxlen,
|
|
|
|
|
int type,
|
|
|
|
|
const void *data,
|
|
|
|
|
int alen)
|
|
|
|
|
{
|
|
|
|
|
int len = RTA_LENGTH (alen);
|
|
|
|
|
struct rtattr *rta;
|
|
|
|
|
|
|
|
|
|
if (NLMSG_ALIGN (n->nlmsg_len) + RTA_ALIGN (len) > maxlen)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
rta = NLMSG_TAIL (n);
|
|
|
|
|
rta->rta_type = type;
|
|
|
|
|
rta->rta_len = len;
|
|
|
|
|
memcpy (RTA_DATA (rta), data, alen);
|
|
|
|
|
n->nlmsg_len = NLMSG_ALIGN (n->nlmsg_len) + RTA_ALIGN (len);
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/******************************************************************
|
|
|
|
|
* NMPObject/netlink functions
|
|
|
|
|
******************************************************************/
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
#define _check_addr_or_return_val(tb, attr, addr_len, ret_val) \
|
2015-10-12 16:07:01 +02:00
|
|
|
({ \
|
|
|
|
|
const struct nlattr *__t = (tb)[(attr)]; \
|
|
|
|
|
\
|
|
|
|
|
if (__t) { \
|
|
|
|
|
if (nla_len (__t) != (addr_len)) { \
|
2018-09-04 11:16:28 +02:00
|
|
|
return ret_val; \
|
2015-10-12 16:07:01 +02:00
|
|
|
} \
|
|
|
|
|
} \
|
|
|
|
|
!!__t; \
|
|
|
|
|
})
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
#define _check_addr_or_return_null(tb, attr, addr_len) \
|
|
|
|
|
_check_addr_or_return_val (tb, attr, addr_len, NULL)
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/*****************************************************************************/
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/* Copied and heavily modified from libnl3's inet6_parse_protinfo(). */
|
|
|
|
|
static gboolean
|
|
|
|
|
_parse_af_inet6 (NMPlatform *platform,
|
|
|
|
|
struct nlattr *attr,
|
2016-04-29 21:25:43 +02:00
|
|
|
NMUtilsIPv6IfaceId *out_token,
|
|
|
|
|
gboolean *out_token_valid,
|
|
|
|
|
guint8 *out_addr_gen_mode_inv,
|
|
|
|
|
gboolean *out_addr_gen_mode_valid)
|
2015-10-12 16:07:01 +02:00
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2015-10-12 16:07:01 +02:00
|
|
|
[IFLA_INET6_FLAGS] = { .type = NLA_U32 },
|
2017-01-16 16:11:05 +01:00
|
|
|
[IFLA_INET6_CACHEINFO] = { .minlen = nm_offsetofend (struct ifla_cacheinfo, retrans_time) },
|
2015-10-12 16:07:01 +02:00
|
|
|
[IFLA_INET6_CONF] = { .minlen = 4 },
|
|
|
|
|
[IFLA_INET6_STATS] = { .minlen = 8 },
|
|
|
|
|
[IFLA_INET6_ICMP6STATS] = { .minlen = 8 },
|
2019-02-17 11:12:02 +01:00
|
|
|
[IFLA_INET6_TOKEN] = { .minlen = sizeof (struct in6_addr) },
|
2015-10-12 16:07:01 +02:00
|
|
|
[IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-10-12 16:07:01 +02:00
|
|
|
struct in6_addr i6_token;
|
2016-04-29 21:25:43 +02:00
|
|
|
gboolean token_valid = FALSE;
|
|
|
|
|
gboolean addr_gen_mode_valid = FALSE;
|
2015-10-12 16:07:01 +02:00
|
|
|
guint8 i6_addr_gen_mode_inv = 0;
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, attr, policy) < 0)
|
2018-09-04 11:16:28 +02:00
|
|
|
return FALSE;
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2019-02-17 11:12:02 +01:00
|
|
|
if (tb[IFLA_INET6_CONF] && nla_len (tb[IFLA_INET6_CONF]) % 4)
|
2018-09-04 11:16:28 +02:00
|
|
|
return FALSE;
|
2019-02-17 11:12:02 +01:00
|
|
|
if (tb[IFLA_INET6_STATS] && nla_len (tb[IFLA_INET6_STATS]) % 8)
|
2018-09-04 11:16:28 +02:00
|
|
|
return FALSE;
|
2019-02-17 11:12:02 +01:00
|
|
|
if (tb[IFLA_INET6_ICMP6STATS] && nla_len (tb[IFLA_INET6_ICMP6STATS]) % 8)
|
2018-09-04 11:16:28 +02:00
|
|
|
return FALSE;
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
if (_check_addr_or_return_val (tb, IFLA_INET6_TOKEN, sizeof (struct in6_addr), FALSE)) {
|
2015-10-12 16:07:01 +02:00
|
|
|
nla_memcpy (&i6_token, tb[IFLA_INET6_TOKEN], sizeof (struct in6_addr));
|
2016-04-29 21:25:43 +02:00
|
|
|
token_valid = TRUE;
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/* Hack to detect support addrgenmode of the kernel. We only parse
|
|
|
|
|
* netlink messages that we receive from kernel, hence this check
|
|
|
|
|
* is valid. */
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
if (_support_user_ipv6ll_still_undecided ())
|
|
|
|
|
_support_user_ipv6ll_detect (tb);
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
|
|
|
|
|
i6_addr_gen_mode_inv = _nm_platform_uint8_inv (nla_get_u8 (tb[IFLA_INET6_ADDR_GEN_MODE]));
|
|
|
|
|
if (i6_addr_gen_mode_inv == 0) {
|
|
|
|
|
/* an inverse addrgenmode of zero is unexpected. We need to reserve zero
|
|
|
|
|
* to signal "unset". */
|
2018-09-04 11:16:28 +02:00
|
|
|
return FALSE;
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
2016-04-29 21:25:43 +02:00
|
|
|
addr_gen_mode_valid = TRUE;
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
|
|
|
|
|
2016-04-29 21:25:43 +02:00
|
|
|
if (token_valid) {
|
|
|
|
|
*out_token_valid = token_valid;
|
|
|
|
|
nm_utils_ipv6_interface_identifier_get_from_addr (out_token, &i6_token);
|
|
|
|
|
}
|
|
|
|
|
if (addr_gen_mode_valid) {
|
|
|
|
|
*out_addr_gen_mode_valid = addr_gen_mode_valid;
|
|
|
|
|
*out_addr_gen_mode_inv = i6_addr_gen_mode_inv;
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
2018-09-04 11:16:28 +02:00
|
|
|
return TRUE;
|
2015-05-29 09:40:24 +02:00
|
|
|
}
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2015-10-12 15:15:21 +02:00
|
|
|
static NMPObject *
|
|
|
|
|
_parse_lnk_gre (const char *kind, struct nlattr *info_data)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2015-10-12 15:15:21 +02:00
|
|
|
[IFLA_GRE_LINK] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
|
|
|
|
|
[IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
|
|
|
|
|
[IFLA_GRE_IKEY] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_GRE_OKEY] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_GRE_LOCAL] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_GRE_REMOTE] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_GRE_TTL] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_GRE_TOS] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-10-12 15:15:21 +02:00
|
|
|
NMPObject *obj;
|
|
|
|
|
NMPlatformLnkGre *props;
|
2018-06-26 10:45:35 +02:00
|
|
|
gboolean is_tap;
|
2015-10-12 15:15:21 +02:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if ( !info_data
|
|
|
|
|
|| !kind)
|
2018-06-26 10:45:35 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
if (nm_streq (kind, "gretap"))
|
|
|
|
|
is_tap = TRUE;
|
|
|
|
|
else if (nm_streq (kind, "gre"))
|
|
|
|
|
is_tap = FALSE;
|
|
|
|
|
else
|
2015-10-12 15:15:21 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, info_data, policy) < 0)
|
2015-10-12 15:15:21 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
2018-06-26 10:45:35 +02:00
|
|
|
obj = nmp_object_new (is_tap ? NMP_OBJECT_TYPE_LNK_GRETAP : NMP_OBJECT_TYPE_LNK_GRE, NULL);
|
2015-10-12 15:15:21 +02:00
|
|
|
props = &obj->lnk_gre;
|
|
|
|
|
|
|
|
|
|
props->parent_ifindex = tb[IFLA_GRE_LINK] ? nla_get_u32 (tb[IFLA_GRE_LINK]) : 0;
|
2015-09-01 22:11:47 +02:00
|
|
|
props->input_flags = tb[IFLA_GRE_IFLAGS] ? ntohs (nla_get_u16 (tb[IFLA_GRE_IFLAGS])) : 0;
|
|
|
|
|
props->output_flags = tb[IFLA_GRE_OFLAGS] ? ntohs (nla_get_u16 (tb[IFLA_GRE_OFLAGS])) : 0;
|
|
|
|
|
props->input_key = tb[IFLA_GRE_IKEY] ? ntohl (nla_get_u32 (tb[IFLA_GRE_IKEY])) : 0;
|
|
|
|
|
props->output_key = tb[IFLA_GRE_OKEY] ? ntohl (nla_get_u32 (tb[IFLA_GRE_OKEY])) : 0;
|
2015-10-12 15:15:21 +02:00
|
|
|
props->local = tb[IFLA_GRE_LOCAL] ? nla_get_u32 (tb[IFLA_GRE_LOCAL]) : 0;
|
|
|
|
|
props->remote = tb[IFLA_GRE_REMOTE] ? nla_get_u32 (tb[IFLA_GRE_REMOTE]) : 0;
|
|
|
|
|
props->tos = tb[IFLA_GRE_TOS] ? nla_get_u8 (tb[IFLA_GRE_TOS]) : 0;
|
|
|
|
|
props->ttl = tb[IFLA_GRE_TTL] ? nla_get_u8 (tb[IFLA_GRE_TTL]) : 0;
|
|
|
|
|
props->path_mtu_discovery = !tb[IFLA_GRE_PMTUDISC] || !!nla_get_u8 (tb[IFLA_GRE_PMTUDISC]);
|
2018-06-26 10:45:35 +02:00
|
|
|
props->is_tap = is_tap;
|
2015-10-12 15:15:21 +02:00
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2015-10-15 15:47:14 +02:00
|
|
|
/* IFLA_IPOIB_* were introduced in the 3.7 kernel, but the kernel headers
|
|
|
|
|
* we're building against might not have those properties even though the
|
|
|
|
|
* running kernel might.
|
|
|
|
|
*/
|
|
|
|
|
#define IFLA_IPOIB_UNSPEC 0
|
|
|
|
|
#define IFLA_IPOIB_PKEY 1
|
|
|
|
|
#define IFLA_IPOIB_MODE 2
|
|
|
|
|
#define IFLA_IPOIB_UMCAST 3
|
|
|
|
|
#undef IFLA_IPOIB_MAX
|
|
|
|
|
#define IFLA_IPOIB_MAX IFLA_IPOIB_UMCAST
|
|
|
|
|
|
|
|
|
|
#define IPOIB_MODE_DATAGRAM 0 /* using unreliable datagram QPs */
|
|
|
|
|
#define IPOIB_MODE_CONNECTED 1 /* using connected QPs */
|
|
|
|
|
|
|
|
|
|
static NMPObject *
|
|
|
|
|
_parse_lnk_infiniband (const char *kind, struct nlattr *info_data)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2015-10-15 15:47:14 +02:00
|
|
|
[IFLA_IPOIB_PKEY] = { .type = NLA_U16 },
|
|
|
|
|
[IFLA_IPOIB_MODE] = { .type = NLA_U16 },
|
|
|
|
|
[IFLA_IPOIB_UMCAST] = { .type = NLA_U16 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-10-15 15:47:14 +02:00
|
|
|
NMPlatformLnkInfiniband *info;
|
|
|
|
|
NMPObject *obj;
|
|
|
|
|
const char *mode;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if ( !info_data
|
|
|
|
|
|| !nm_streq0 (kind, "ipoib"))
|
2015-10-15 15:47:14 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, info_data, policy) < 0)
|
2015-10-15 15:47:14 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if ( !tb[IFLA_IPOIB_PKEY]
|
|
|
|
|
|| !tb[IFLA_IPOIB_MODE])
|
2015-10-15 15:47:14 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
switch (nla_get_u16 (tb[IFLA_IPOIB_MODE])) {
|
|
|
|
|
case IPOIB_MODE_DATAGRAM:
|
|
|
|
|
mode = "datagram";
|
|
|
|
|
break;
|
|
|
|
|
case IPOIB_MODE_CONNECTED:
|
|
|
|
|
mode = "connected";
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
obj = nmp_object_new (NMP_OBJECT_TYPE_LNK_INFINIBAND, NULL);
|
|
|
|
|
info = &obj->lnk_infiniband;
|
|
|
|
|
|
|
|
|
|
info->p_key = nla_get_u16 (tb[IFLA_IPOIB_PKEY]);
|
|
|
|
|
info->mode = mode;
|
|
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2015-11-27 22:22:25 +01:00
|
|
|
static NMPObject *
|
|
|
|
|
_parse_lnk_ip6tnl (const char *kind, struct nlattr *info_data)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2015-11-27 22:22:25 +01:00
|
|
|
[IFLA_IPTUN_LINK] = { .type = NLA_U32 },
|
2019-02-21 08:37:40 +01:00
|
|
|
[IFLA_IPTUN_LOCAL] = { .minlen = sizeof (struct in6_addr)},
|
|
|
|
|
[IFLA_IPTUN_REMOTE] = { .minlen = sizeof (struct in6_addr)},
|
2015-11-27 22:22:25 +01:00
|
|
|
[IFLA_IPTUN_TTL] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
|
2017-12-22 10:24:25 +01:00
|
|
|
[IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
|
2015-11-27 22:22:25 +01:00
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-11-27 22:22:25 +01:00
|
|
|
NMPObject *obj;
|
|
|
|
|
NMPlatformLnkIp6Tnl *props;
|
|
|
|
|
guint32 flowinfo;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if ( !info_data
|
|
|
|
|
|| !nm_streq0 (kind, "ip6tnl"))
|
2015-11-27 22:22:25 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, info_data, policy) < 0)
|
2015-11-27 22:22:25 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
obj = nmp_object_new (NMP_OBJECT_TYPE_LNK_IP6TNL, NULL);
|
|
|
|
|
props = &obj->lnk_ip6tnl;
|
|
|
|
|
|
|
|
|
|
if (tb[IFLA_IPTUN_LINK])
|
|
|
|
|
props->parent_ifindex = nla_get_u32 (tb[IFLA_IPTUN_LINK]);
|
|
|
|
|
if (tb[IFLA_IPTUN_LOCAL])
|
2019-02-21 08:37:40 +01:00
|
|
|
props->local = *nla_data_as (struct in6_addr, tb[IFLA_IPTUN_LOCAL]);
|
2015-11-27 22:22:25 +01:00
|
|
|
if (tb[IFLA_IPTUN_REMOTE])
|
2019-02-21 08:37:40 +01:00
|
|
|
props->remote = *nla_data_as (struct in6_addr, tb[IFLA_IPTUN_REMOTE]);
|
2015-11-27 22:22:25 +01:00
|
|
|
if (tb[IFLA_IPTUN_TTL])
|
|
|
|
|
props->ttl = nla_get_u8 (tb[IFLA_IPTUN_TTL]);
|
|
|
|
|
if (tb[IFLA_IPTUN_ENCAP_LIMIT])
|
|
|
|
|
props->encap_limit = nla_get_u8 (tb[IFLA_IPTUN_ENCAP_LIMIT]);
|
|
|
|
|
if (tb[IFLA_IPTUN_FLOWINFO]) {
|
|
|
|
|
flowinfo = ntohl (nla_get_u32 (tb[IFLA_IPTUN_FLOWINFO]));
|
|
|
|
|
props->flow_label = flowinfo & IP6_FLOWINFO_FLOWLABEL_MASK;
|
|
|
|
|
props->tclass = (flowinfo & IP6_FLOWINFO_TCLASS_MASK) >> IP6_FLOWINFO_TCLASS_SHIFT;
|
|
|
|
|
}
|
|
|
|
|
if (tb[IFLA_IPTUN_PROTO])
|
|
|
|
|
props->proto = nla_get_u8 (tb[IFLA_IPTUN_PROTO]);
|
2017-12-22 10:24:25 +01:00
|
|
|
if (tb[IFLA_IPTUN_FLAGS])
|
|
|
|
|
props->flags = nla_get_u32 (tb[IFLA_IPTUN_FLAGS]);
|
2015-11-27 22:22:25 +01:00
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-26 12:06:43 +02:00
|
|
|
static NMPObject *
|
|
|
|
|
_parse_lnk_ip6gre (const char *kind, struct nlattr *info_data)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2018-06-26 12:06:43 +02:00
|
|
|
[IFLA_GRE_LINK] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
|
|
|
|
|
[IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
|
|
|
|
|
[IFLA_GRE_IKEY] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_GRE_OKEY] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_GRE_LOCAL] = { .type = NLA_UNSPEC,
|
2019-02-15 11:33:57 +01:00
|
|
|
.minlen = sizeof (struct in6_addr)},
|
2018-06-26 12:06:43 +02:00
|
|
|
[IFLA_GRE_REMOTE] = { .type = NLA_UNSPEC,
|
2019-02-15 11:33:57 +01:00
|
|
|
.minlen = sizeof (struct in6_addr)},
|
2018-06-26 12:06:43 +02:00
|
|
|
[IFLA_GRE_TTL] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_GRE_FLAGS] = { .type = NLA_U32 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2018-06-26 12:06:43 +02:00
|
|
|
NMPObject *obj;
|
|
|
|
|
NMPlatformLnkIp6Tnl *props;
|
|
|
|
|
guint32 flowinfo;
|
|
|
|
|
gboolean is_tap;
|
|
|
|
|
|
|
|
|
|
if (!info_data || !kind)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
if (nm_streq (kind, "ip6gre"))
|
|
|
|
|
is_tap = FALSE;
|
|
|
|
|
else if (nm_streq (kind, "ip6gretap"))
|
|
|
|
|
is_tap = TRUE;
|
|
|
|
|
else
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, info_data, policy) < 0)
|
2018-06-26 12:06:43 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
obj = nmp_object_new (is_tap ? NMP_OBJECT_TYPE_LNK_IP6GRETAP : NMP_OBJECT_TYPE_LNK_IP6GRE, NULL);
|
|
|
|
|
props = &obj->lnk_ip6tnl;
|
|
|
|
|
props->is_gre = TRUE;
|
|
|
|
|
props->is_tap = is_tap;
|
|
|
|
|
|
|
|
|
|
if (tb[IFLA_GRE_LINK])
|
|
|
|
|
props->parent_ifindex = nla_get_u32 (tb[IFLA_GRE_LINK]);
|
|
|
|
|
if (tb[IFLA_GRE_IFLAGS])
|
|
|
|
|
props->input_flags = ntohs (nla_get_u16 (tb[IFLA_GRE_IFLAGS]));
|
|
|
|
|
if (tb[IFLA_GRE_OFLAGS])
|
|
|
|
|
props->output_flags = ntohs (nla_get_u16 (tb[IFLA_GRE_OFLAGS]));
|
|
|
|
|
if (tb[IFLA_GRE_IKEY])
|
|
|
|
|
props->input_key = ntohl (nla_get_u32 (tb[IFLA_GRE_IKEY]));
|
|
|
|
|
if (tb[IFLA_GRE_OKEY])
|
|
|
|
|
props->output_key = ntohl (nla_get_u32 (tb[IFLA_GRE_OKEY]));
|
|
|
|
|
if (tb[IFLA_GRE_LOCAL])
|
2019-02-21 08:37:40 +01:00
|
|
|
props->local = *nla_data_as (struct in6_addr, tb[IFLA_GRE_LOCAL]);
|
2018-06-26 12:06:43 +02:00
|
|
|
if (tb[IFLA_GRE_REMOTE])
|
2019-02-21 08:37:40 +01:00
|
|
|
props->remote = *nla_data_as (struct in6_addr, tb[IFLA_GRE_REMOTE]);
|
2018-06-26 12:06:43 +02:00
|
|
|
if (tb[IFLA_GRE_TTL])
|
|
|
|
|
props->ttl = nla_get_u8 (tb[IFLA_GRE_TTL]);
|
|
|
|
|
if (tb[IFLA_GRE_ENCAP_LIMIT])
|
|
|
|
|
props->encap_limit = nla_get_u8 (tb[IFLA_GRE_ENCAP_LIMIT]);
|
|
|
|
|
if (tb[IFLA_GRE_FLOWINFO]) {
|
|
|
|
|
flowinfo = ntohl (nla_get_u32 (tb[IFLA_GRE_FLOWINFO]));
|
|
|
|
|
props->flow_label = flowinfo & IP6_FLOWINFO_FLOWLABEL_MASK;
|
|
|
|
|
props->tclass = (flowinfo & IP6_FLOWINFO_TCLASS_MASK) >> IP6_FLOWINFO_TCLASS_SHIFT;
|
|
|
|
|
}
|
|
|
|
|
if (tb[IFLA_GRE_FLAGS])
|
|
|
|
|
props->flags = nla_get_u32 (tb[IFLA_GRE_FLAGS]);
|
|
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-27 22:22:25 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2015-11-27 14:01:56 +01:00
|
|
|
static NMPObject *
|
|
|
|
|
_parse_lnk_ipip (const char *kind, struct nlattr *info_data)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2015-11-27 14:01:56 +01:00
|
|
|
[IFLA_IPTUN_LINK] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_IPTUN_LOCAL] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_IPTUN_REMOTE] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_IPTUN_TTL] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_IPTUN_TOS] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-11-27 14:01:56 +01:00
|
|
|
NMPObject *obj;
|
|
|
|
|
NMPlatformLnkIpIp *props;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if ( !info_data
|
|
|
|
|
|| !nm_streq0 (kind, "ipip"))
|
2015-11-27 14:01:56 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, info_data, policy) < 0)
|
2015-11-27 14:01:56 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
obj = nmp_object_new (NMP_OBJECT_TYPE_LNK_IPIP, NULL);
|
|
|
|
|
props = &obj->lnk_ipip;
|
|
|
|
|
|
|
|
|
|
props->parent_ifindex = tb[IFLA_IPTUN_LINK] ? nla_get_u32 (tb[IFLA_IPTUN_LINK]) : 0;
|
|
|
|
|
props->local = tb[IFLA_IPTUN_LOCAL] ? nla_get_u32 (tb[IFLA_IPTUN_LOCAL]) : 0;
|
|
|
|
|
props->remote = tb[IFLA_IPTUN_REMOTE] ? nla_get_u32 (tb[IFLA_IPTUN_REMOTE]) : 0;
|
|
|
|
|
props->tos = tb[IFLA_IPTUN_TOS] ? nla_get_u8 (tb[IFLA_IPTUN_TOS]) : 0;
|
|
|
|
|
props->ttl = tb[IFLA_IPTUN_TTL] ? nla_get_u8 (tb[IFLA_IPTUN_TTL]) : 0;
|
|
|
|
|
props->path_mtu_discovery = !tb[IFLA_IPTUN_PMTUDISC] || !!nla_get_u8 (tb[IFLA_IPTUN_PMTUDISC]);
|
|
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2015-10-12 15:15:21 +02:00
|
|
|
static NMPObject *
|
|
|
|
|
_parse_lnk_macvlan (const char *kind, struct nlattr *info_data)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2015-10-12 15:15:21 +02:00
|
|
|
[IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
|
|
|
|
|
};
|
|
|
|
|
NMPlatformLnkMacvlan *props;
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-10-12 15:15:21 +02:00
|
|
|
NMPObject *obj;
|
2015-12-04 09:49:39 +01:00
|
|
|
gboolean tap;
|
2015-10-12 15:15:21 +02:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if ( !info_data
|
|
|
|
|
|| !kind)
|
2015-12-04 09:49:39 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nm_streq (kind, "macvlan"))
|
2015-12-04 09:49:39 +01:00
|
|
|
tap = FALSE;
|
2019-02-15 11:33:57 +01:00
|
|
|
else if (nm_streq (kind, "macvtap"))
|
2015-12-04 09:49:39 +01:00
|
|
|
tap = TRUE;
|
|
|
|
|
else
|
2015-10-12 15:15:21 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, info_data, policy) < 0)
|
2015-10-12 15:15:21 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
if (!tb[IFLA_MACVLAN_MODE])
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2015-12-04 09:49:39 +01:00
|
|
|
obj = nmp_object_new (tap ? NMP_OBJECT_TYPE_LNK_MACVTAP : NMP_OBJECT_TYPE_LNK_MACVLAN, NULL);
|
2015-10-12 15:15:21 +02:00
|
|
|
props = &obj->lnk_macvlan;
|
2015-12-03 15:44:33 +01:00
|
|
|
props->mode = nla_get_u32 (tb[IFLA_MACVLAN_MODE]);
|
2015-12-04 09:49:39 +01:00
|
|
|
props->tap = tap;
|
2015-10-12 15:15:21 +02:00
|
|
|
|
|
|
|
|
if (tb[IFLA_MACVLAN_FLAGS])
|
|
|
|
|
props->no_promisc = NM_FLAGS_HAS (nla_get_u16 (tb[IFLA_MACVLAN_FLAGS]), MACVLAN_FLAG_NOPROMISC);
|
|
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2016-06-30 18:20:09 +02:00
|
|
|
static NMPObject *
|
|
|
|
|
_parse_lnk_macsec (const char *kind, struct nlattr *info_data)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2016-06-30 18:20:09 +02:00
|
|
|
[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
|
|
|
|
|
[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
|
|
|
|
|
[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_MACSEC_ES] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2016-06-30 18:20:09 +02:00
|
|
|
NMPObject *obj;
|
|
|
|
|
NMPlatformLnkMacsec *props;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if ( !info_data
|
|
|
|
|
|| !nm_streq0 (kind, "macsec"))
|
2016-06-30 18:20:09 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, info_data, policy) < 0)
|
2016-06-30 18:20:09 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
obj = nmp_object_new (NMP_OBJECT_TYPE_LNK_MACSEC, NULL);
|
|
|
|
|
props = &obj->lnk_macsec;
|
|
|
|
|
|
2019-02-18 10:40:03 +01:00
|
|
|
if (tb[IFLA_MACSEC_SCI]) { props->sci = nla_get_be64 (tb[IFLA_MACSEC_SCI]); }
|
|
|
|
|
if (tb[IFLA_MACSEC_ICV_LEN]) { props->icv_length = nla_get_u8 (tb[IFLA_MACSEC_ICV_LEN]); }
|
|
|
|
|
if (tb[IFLA_MACSEC_CIPHER_SUITE]) { props->cipher_suite = nla_get_u64 (tb[IFLA_MACSEC_CIPHER_SUITE]); }
|
|
|
|
|
if (tb[IFLA_MACSEC_WINDOW]) { props->window = nla_get_u32 (tb[IFLA_MACSEC_WINDOW]); }
|
|
|
|
|
if (tb[IFLA_MACSEC_ENCODING_SA]) { props->encoding_sa = !!nla_get_u8 (tb[IFLA_MACSEC_ENCODING_SA]); }
|
|
|
|
|
if (tb[IFLA_MACSEC_ENCRYPT]) { props->encrypt = !!nla_get_u8 (tb[IFLA_MACSEC_ENCRYPT]); }
|
|
|
|
|
if (tb[IFLA_MACSEC_PROTECT]) { props->protect = !!nla_get_u8 (tb[IFLA_MACSEC_PROTECT]); }
|
|
|
|
|
if (tb[IFLA_MACSEC_INC_SCI]) { props->include_sci = !!nla_get_u8 (tb[IFLA_MACSEC_INC_SCI]); }
|
|
|
|
|
if (tb[IFLA_MACSEC_ES]) { props->es = !!nla_get_u8 (tb[IFLA_MACSEC_ES]); }
|
|
|
|
|
if (tb[IFLA_MACSEC_SCB]) { props->scb = !!nla_get_u8 (tb[IFLA_MACSEC_SCB]); }
|
|
|
|
|
if (tb[IFLA_MACSEC_REPLAY_PROTECT]) { props->replay_protect = !!nla_get_u8 (tb[IFLA_MACSEC_REPLAY_PROTECT]); }
|
|
|
|
|
if (tb[IFLA_MACSEC_VALIDATION]) { props->validation = nla_get_u8 (tb[IFLA_MACSEC_VALIDATION]); }
|
2016-06-30 18:20:09 +02:00
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2015-11-11 18:41:48 +01:00
|
|
|
static NMPObject *
|
|
|
|
|
_parse_lnk_sit (const char *kind, struct nlattr *info_data)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2015-11-11 18:41:48 +01:00
|
|
|
[IFLA_IPTUN_LINK] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_IPTUN_LOCAL] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_IPTUN_REMOTE] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_IPTUN_TTL] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_IPTUN_TOS] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_IPTUN_FLAGS] = { .type = NLA_U16 },
|
|
|
|
|
[IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-11-11 18:41:48 +01:00
|
|
|
NMPObject *obj;
|
|
|
|
|
NMPlatformLnkSit *props;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if ( !info_data
|
|
|
|
|
|| !nm_streq0 (kind, "sit"))
|
2015-11-11 18:41:48 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, info_data, policy) < 0)
|
2015-11-11 18:41:48 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
obj = nmp_object_new (NMP_OBJECT_TYPE_LNK_SIT, NULL);
|
|
|
|
|
props = &obj->lnk_sit;
|
|
|
|
|
|
|
|
|
|
props->parent_ifindex = tb[IFLA_IPTUN_LINK] ? nla_get_u32 (tb[IFLA_IPTUN_LINK]) : 0;
|
|
|
|
|
props->local = tb[IFLA_IPTUN_LOCAL] ? nla_get_u32 (tb[IFLA_IPTUN_LOCAL]) : 0;
|
|
|
|
|
props->remote = tb[IFLA_IPTUN_REMOTE] ? nla_get_u32 (tb[IFLA_IPTUN_REMOTE]) : 0;
|
|
|
|
|
props->tos = tb[IFLA_IPTUN_TOS] ? nla_get_u8 (tb[IFLA_IPTUN_TOS]) : 0;
|
|
|
|
|
props->ttl = tb[IFLA_IPTUN_TTL] ? nla_get_u8 (tb[IFLA_IPTUN_TTL]) : 0;
|
|
|
|
|
props->path_mtu_discovery = !tb[IFLA_IPTUN_PMTUDISC] || !!nla_get_u8 (tb[IFLA_IPTUN_PMTUDISC]);
|
|
|
|
|
props->flags = tb[IFLA_IPTUN_FLAGS] ? nla_get_u16 (tb[IFLA_IPTUN_FLAGS]) : 0;
|
|
|
|
|
props->proto = tb[IFLA_IPTUN_PROTO] ? nla_get_u8 (tb[IFLA_IPTUN_PROTO]) : 0;
|
|
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
static NMPObject *
|
|
|
|
|
_parse_lnk_tun (const char *kind, struct nlattr *info_data)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
[IFLA_TUN_OWNER] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_TUN_GROUP] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_TUN_TYPE] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_TUN_PI] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_TUN_VNET_HDR] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_TUN_PERSIST] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_TUN_MULTI_QUEUE] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_TUN_NUM_QUEUES] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_TUN_NUM_DISABLED_QUEUES] = { .type = NLA_U32 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
NMPObject *obj;
|
|
|
|
|
NMPlatformLnkTun *props;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if ( !info_data
|
|
|
|
|
|| !nm_streq0 (kind, "tun"))
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, info_data, policy) < 0)
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (!tb[IFLA_TUN_TYPE])
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
obj = nmp_object_new (NMP_OBJECT_TYPE_LNK_TUN, NULL);
|
|
|
|
|
props = &obj->lnk_tun;
|
|
|
|
|
|
|
|
|
|
props->type = nla_get_u8 (tb[IFLA_TUN_TYPE]);
|
|
|
|
|
|
|
|
|
|
props->pi = !!nla_get_u8_cond (tb, IFLA_TUN_PI, FALSE);
|
|
|
|
|
props->vnet_hdr = !!nla_get_u8_cond (tb, IFLA_TUN_VNET_HDR, FALSE);
|
|
|
|
|
props->multi_queue = !!nla_get_u8_cond (tb, IFLA_TUN_MULTI_QUEUE, FALSE);
|
|
|
|
|
props->persist = !!nla_get_u8_cond (tb, IFLA_TUN_PERSIST, FALSE);
|
|
|
|
|
|
|
|
|
|
if (tb[IFLA_TUN_OWNER]) {
|
|
|
|
|
props->owner_valid = TRUE;
|
|
|
|
|
props->owner = nla_get_u32 (tb[IFLA_TUN_OWNER]);
|
|
|
|
|
}
|
|
|
|
|
if (tb[IFLA_TUN_GROUP]) {
|
|
|
|
|
props->group_valid = TRUE;
|
|
|
|
|
props->group = nla_get_u32 (tb[IFLA_TUN_GROUP]);
|
|
|
|
|
}
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
static gboolean
|
|
|
|
|
_vlan_qos_mapping_from_nla (struct nlattr *nlattr,
|
|
|
|
|
const NMVlanQosMapping **out_map,
|
|
|
|
|
guint *out_n_map)
|
|
|
|
|
{
|
|
|
|
|
struct nlattr *nla;
|
|
|
|
|
int remaining;
|
|
|
|
|
gs_unref_ptrarray GPtrArray *array = NULL;
|
|
|
|
|
|
|
|
|
|
G_STATIC_ASSERT (sizeof (NMVlanQosMapping) == sizeof (struct ifla_vlan_qos_mapping));
|
|
|
|
|
G_STATIC_ASSERT (sizeof (((NMVlanQosMapping *) 0)->to) == sizeof (((struct ifla_vlan_qos_mapping *) 0)->to));
|
|
|
|
|
G_STATIC_ASSERT (sizeof (((NMVlanQosMapping *) 0)->from) == sizeof (((struct ifla_vlan_qos_mapping *) 0)->from));
|
|
|
|
|
G_STATIC_ASSERT (sizeof (NMVlanQosMapping) == sizeof (((NMVlanQosMapping *) 0)->from) + sizeof (((NMVlanQosMapping *) 0)->to));
|
|
|
|
|
|
|
|
|
|
nm_assert (out_map && !*out_map);
|
|
|
|
|
nm_assert (out_n_map && !*out_n_map);
|
|
|
|
|
|
|
|
|
|
if (!nlattr)
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
|
|
array = g_ptr_array_new ();
|
|
|
|
|
nla_for_each_nested (nla, nlattr, remaining) {
|
2019-02-17 11:12:02 +01:00
|
|
|
if (nla_len (nla) < sizeof (NMVlanQosMapping))
|
2015-10-27 16:14:54 +01:00
|
|
|
return FALSE;
|
|
|
|
|
g_ptr_array_add (array, nla_data (nla));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (array->len > 0) {
|
|
|
|
|
NMVlanQosMapping *list;
|
|
|
|
|
guint i, j;
|
|
|
|
|
|
|
|
|
|
/* The sorting is necessary, because for egress mapping, kernel
|
|
|
|
|
* doesn't sent the items strictly sorted by the from field. */
|
|
|
|
|
g_ptr_array_sort_with_data (array, _vlan_qos_mapping_cmp_from_ptr, NULL);
|
|
|
|
|
|
|
|
|
|
list = g_new (NMVlanQosMapping, array->len);
|
|
|
|
|
|
|
|
|
|
for (i = 0, j = 0; i < array->len; i++) {
|
|
|
|
|
NMVlanQosMapping *map;
|
|
|
|
|
|
|
|
|
|
map = array->pdata[i];
|
|
|
|
|
|
|
|
|
|
/* kernel doesn't really send us duplicates. Just be extra cautious
|
|
|
|
|
* because we want strong guarantees about the sort order and uniqueness
|
|
|
|
|
* of our mapping list (for simpler equality comparison). */
|
|
|
|
|
if ( j > 0
|
|
|
|
|
&& list[j - 1].from == map->from)
|
|
|
|
|
list[j - 1] = *map;
|
|
|
|
|
else
|
|
|
|
|
list[j++] = *map;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*out_n_map = j;
|
|
|
|
|
*out_map = list;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/* Copied and heavily modified from libnl3's vlan_parse() */
|
2015-10-12 13:44:44 +02:00
|
|
|
static NMPObject *
|
|
|
|
|
_parse_lnk_vlan (const char *kind, struct nlattr *info_data)
|
2015-10-12 16:07:01 +02:00
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2015-10-12 16:07:01 +02:00
|
|
|
[IFLA_VLAN_ID] = { .type = NLA_U16 },
|
2017-01-16 16:11:05 +01:00
|
|
|
[IFLA_VLAN_FLAGS] = { .minlen = nm_offsetofend (struct ifla_vlan_flags, flags) },
|
2015-10-12 16:07:01 +02:00
|
|
|
[IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED },
|
|
|
|
|
[IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED },
|
|
|
|
|
[IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-10-27 16:14:54 +01:00
|
|
|
nm_auto_nmpobj NMPObject *obj = NULL;
|
|
|
|
|
NMPObject *obj_result;
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if ( !info_data
|
|
|
|
|
|| !nm_streq0 (kind, "vlan"))
|
2015-10-12 13:44:44 +02:00
|
|
|
return NULL;
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, info_data, policy) < 0)
|
2015-10-12 13:44:44 +02:00
|
|
|
return NULL;
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (!tb[IFLA_VLAN_ID])
|
2015-10-12 13:44:44 +02:00
|
|
|
return NULL;
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-12 13:44:44 +02:00
|
|
|
obj = nmp_object_new (NMP_OBJECT_TYPE_LNK_VLAN, NULL);
|
2015-10-27 16:14:54 +01:00
|
|
|
obj->lnk_vlan.id = nla_get_u16 (tb[IFLA_VLAN_ID]);
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
if (tb[IFLA_VLAN_FLAGS]) {
|
|
|
|
|
struct ifla_vlan_flags flags;
|
|
|
|
|
|
2019-02-17 11:12:02 +01:00
|
|
|
nla_memcpy (&flags, tb[IFLA_VLAN_FLAGS], sizeof (flags));
|
2015-10-27 16:14:54 +01:00
|
|
|
|
|
|
|
|
obj->lnk_vlan.flags = flags.flags;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!_vlan_qos_mapping_from_nla (tb[IFLA_VLAN_INGRESS_QOS],
|
|
|
|
|
&obj->_lnk_vlan.ingress_qos_map,
|
|
|
|
|
&obj->_lnk_vlan.n_ingress_qos_map))
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
if (!_vlan_qos_mapping_from_nla (tb[IFLA_VLAN_EGRESS_QOS],
|
|
|
|
|
&obj->_lnk_vlan.egress_qos_map,
|
|
|
|
|
&obj->_lnk_vlan.n_egress_qos_map))
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
obj_result = obj;
|
|
|
|
|
obj = NULL;
|
|
|
|
|
return obj_result;
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2015-10-12 15:15:21 +02:00
|
|
|
/* The installed kernel headers might not have VXLAN stuff at all, or
|
|
|
|
|
* they might have the original properties, but not PORT, GROUP6, or LOCAL6.
|
|
|
|
|
* So until we depend on kernel >= 3.11, we just ignore the actual enum
|
|
|
|
|
* in if_link.h and define the values ourselves.
|
|
|
|
|
*/
|
|
|
|
|
#define IFLA_VXLAN_UNSPEC 0
|
|
|
|
|
#define IFLA_VXLAN_ID 1
|
|
|
|
|
#define IFLA_VXLAN_GROUP 2
|
|
|
|
|
#define IFLA_VXLAN_LINK 3
|
|
|
|
|
#define IFLA_VXLAN_LOCAL 4
|
|
|
|
|
#define IFLA_VXLAN_TTL 5
|
|
|
|
|
#define IFLA_VXLAN_TOS 6
|
|
|
|
|
#define IFLA_VXLAN_LEARNING 7
|
|
|
|
|
#define IFLA_VXLAN_AGEING 8
|
|
|
|
|
#define IFLA_VXLAN_LIMIT 9
|
|
|
|
|
#define IFLA_VXLAN_PORT_RANGE 10
|
|
|
|
|
#define IFLA_VXLAN_PROXY 11
|
|
|
|
|
#define IFLA_VXLAN_RSC 12
|
|
|
|
|
#define IFLA_VXLAN_L2MISS 13
|
|
|
|
|
#define IFLA_VXLAN_L3MISS 14
|
|
|
|
|
#define IFLA_VXLAN_PORT 15
|
|
|
|
|
#define IFLA_VXLAN_GROUP6 16
|
|
|
|
|
#define IFLA_VXLAN_LOCAL6 17
|
|
|
|
|
#undef IFLA_VXLAN_MAX
|
|
|
|
|
#define IFLA_VXLAN_MAX IFLA_VXLAN_LOCAL6
|
|
|
|
|
|
|
|
|
|
/* older kernel header might not contain 'struct ifla_vxlan_port_range'.
|
|
|
|
|
* Redefine it. */
|
|
|
|
|
struct nm_ifla_vxlan_port_range {
|
|
|
|
|
guint16 low;
|
|
|
|
|
guint16 high;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static NMPObject *
|
|
|
|
|
_parse_lnk_vxlan (const char *kind, struct nlattr *info_data)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2015-10-12 15:15:21 +02:00
|
|
|
[IFLA_VXLAN_ID] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_VXLAN_GROUP] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_VXLAN_GROUP6] = { .type = NLA_UNSPEC,
|
|
|
|
|
.minlen = sizeof (struct in6_addr) },
|
|
|
|
|
[IFLA_VXLAN_LINK] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_VXLAN_LOCAL] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_VXLAN_LOCAL6] = { .type = NLA_UNSPEC,
|
|
|
|
|
.minlen = sizeof (struct in6_addr) },
|
|
|
|
|
[IFLA_VXLAN_TOS] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_VXLAN_TTL] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_VXLAN_PORT_RANGE] = { .type = NLA_UNSPEC,
|
|
|
|
|
.minlen = sizeof (struct nm_ifla_vxlan_port_range) },
|
|
|
|
|
[IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_VXLAN_RSC] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_VXLAN_PORT] = { .type = NLA_U16 },
|
|
|
|
|
};
|
|
|
|
|
NMPlatformLnkVxlan *props;
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-10-12 15:15:21 +02:00
|
|
|
NMPObject *obj;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if ( !info_data
|
|
|
|
|
|| !nm_streq0 (kind, "vxlan"))
|
2015-10-12 15:15:21 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, info_data, policy) < 0)
|
2015-10-12 15:15:21 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
obj = nmp_object_new (NMP_OBJECT_TYPE_LNK_VXLAN, NULL);
|
|
|
|
|
|
|
|
|
|
props = &obj->lnk_vxlan;
|
|
|
|
|
|
|
|
|
|
if (tb[IFLA_VXLAN_LINK])
|
|
|
|
|
props->parent_ifindex = nla_get_u32 (tb[IFLA_VXLAN_LINK]);
|
|
|
|
|
if (tb[IFLA_VXLAN_ID])
|
|
|
|
|
props->id = nla_get_u32 (tb[IFLA_VXLAN_ID]);
|
|
|
|
|
if (tb[IFLA_VXLAN_GROUP])
|
|
|
|
|
props->group = nla_get_u32 (tb[IFLA_VXLAN_GROUP]);
|
|
|
|
|
if (tb[IFLA_VXLAN_LOCAL])
|
|
|
|
|
props->local = nla_get_u32 (tb[IFLA_VXLAN_LOCAL]);
|
|
|
|
|
if (tb[IFLA_VXLAN_LOCAL6])
|
2019-02-21 08:37:40 +01:00
|
|
|
props->local6 = *nla_data_as (struct in6_addr, tb[IFLA_VXLAN_LOCAL6]);
|
|
|
|
|
if (tb[IFLA_VXLAN_GROUP6])
|
|
|
|
|
props->group6 = *nla_data_as (struct in6_addr, tb[IFLA_VXLAN_GROUP6]);
|
2015-10-12 15:15:21 +02:00
|
|
|
|
|
|
|
|
if (tb[IFLA_VXLAN_AGEING])
|
|
|
|
|
props->ageing = nla_get_u32 (tb[IFLA_VXLAN_AGEING]);
|
|
|
|
|
if (tb[IFLA_VXLAN_LIMIT])
|
|
|
|
|
props->limit = nla_get_u32 (tb[IFLA_VXLAN_LIMIT]);
|
|
|
|
|
if (tb[IFLA_VXLAN_TOS])
|
|
|
|
|
props->tos = nla_get_u8 (tb[IFLA_VXLAN_TOS]);
|
|
|
|
|
if (tb[IFLA_VXLAN_TTL])
|
|
|
|
|
props->ttl = nla_get_u8 (tb[IFLA_VXLAN_TTL]);
|
|
|
|
|
|
|
|
|
|
if (tb[IFLA_VXLAN_PORT])
|
|
|
|
|
props->dst_port = ntohs (nla_get_u16 (tb[IFLA_VXLAN_PORT]));
|
|
|
|
|
|
|
|
|
|
if (tb[IFLA_VXLAN_PORT_RANGE]) {
|
2019-02-21 08:37:40 +01:00
|
|
|
struct nm_ifla_vxlan_port_range *range;
|
|
|
|
|
|
|
|
|
|
range = nla_data_as (struct nm_ifla_vxlan_port_range, tb[IFLA_VXLAN_PORT_RANGE]);
|
2015-10-12 15:15:21 +02:00
|
|
|
props->src_port_min = ntohs (range->low);
|
|
|
|
|
props->src_port_max = ntohs (range->high);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tb[IFLA_VXLAN_LEARNING])
|
|
|
|
|
props->learning = !!nla_get_u8 (tb[IFLA_VXLAN_LEARNING]);
|
|
|
|
|
if (tb[IFLA_VXLAN_PROXY])
|
|
|
|
|
props->proxy = !!nla_get_u8 (tb[IFLA_VXLAN_PROXY]);
|
|
|
|
|
if (tb[IFLA_VXLAN_RSC])
|
|
|
|
|
props->rsc = !!nla_get_u8 (tb[IFLA_VXLAN_RSC]);
|
|
|
|
|
if (tb[IFLA_VXLAN_L2MISS])
|
|
|
|
|
props->l2miss = !!nla_get_u8 (tb[IFLA_VXLAN_L2MISS]);
|
|
|
|
|
if (tb[IFLA_VXLAN_L3MISS])
|
|
|
|
|
props->l3miss = !!nla_get_u8 (tb[IFLA_VXLAN_L3MISS]);
|
|
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
static gboolean
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
_wireguard_update_from_allowed_ips_nla (NMPWireGuardAllowedIP *allowed_ip,
|
|
|
|
|
struct nlattr *nlattr)
|
2018-03-13 13:35:35 +00:00
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2018-03-13 13:35:35 +00:00
|
|
|
[WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
|
|
|
|
|
[WGALLOWEDIP_A_IPADDR] = { .minlen = sizeof (struct in_addr) },
|
|
|
|
|
[WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
int family;
|
2018-03-13 13:35:35 +00:00
|
|
|
int addr_len;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, nlattr, policy) < 0)
|
2018-09-04 11:16:28 +02:00
|
|
|
return FALSE;
|
2018-03-13 13:35:35 +00:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
if (!tb[WGALLOWEDIP_A_FAMILY])
|
|
|
|
|
return FALSE;
|
2018-03-13 13:35:35 +00:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
family = nla_get_u16 (tb[WGALLOWEDIP_A_FAMILY]);
|
|
|
|
|
if (family == AF_INET)
|
2018-03-13 13:35:35 +00:00
|
|
|
addr_len = sizeof (in_addr_t);
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
else if (family == AF_INET6)
|
2018-03-13 13:35:35 +00:00
|
|
|
addr_len = sizeof (struct in6_addr);
|
2018-09-04 11:16:28 +02:00
|
|
|
else
|
|
|
|
|
return FALSE;
|
2018-03-13 13:35:35 +00:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
_check_addr_or_return_val (tb, WGALLOWEDIP_A_IPADDR, addr_len, FALSE);
|
|
|
|
|
|
2018-12-13 11:08:32 +01:00
|
|
|
*allowed_ip = (NMPWireGuardAllowedIP) {
|
|
|
|
|
.family = family,
|
|
|
|
|
};
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
|
|
|
|
|
nm_assert ((int) allowed_ip->family == family);
|
|
|
|
|
|
|
|
|
|
if (tb[WGALLOWEDIP_A_IPADDR])
|
|
|
|
|
nla_memcpy (&allowed_ip->addr, tb[WGALLOWEDIP_A_IPADDR], addr_len);
|
|
|
|
|
if (tb[WGALLOWEDIP_A_CIDR_MASK])
|
|
|
|
|
allowed_ip->mask = nla_get_u8 (tb[WGALLOWEDIP_A_CIDR_MASK]);
|
2018-03-13 13:35:35 +00:00
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
return TRUE;
|
2018-03-13 13:35:35 +00:00
|
|
|
}
|
|
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
typedef struct {
|
|
|
|
|
CList lst;
|
|
|
|
|
NMPWireGuardPeer data;
|
|
|
|
|
} WireGuardPeerConstruct;
|
|
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
static gboolean
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
_wireguard_update_from_peers_nla (CList *peers,
|
|
|
|
|
GArray **p_allowed_ips,
|
2018-03-13 13:35:35 +00:00
|
|
|
struct nlattr *peer_attr)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
[WGPEER_A_PUBLIC_KEY] = { .minlen = NMP_WIREGUARD_PUBLIC_KEY_LEN },
|
2018-03-13 13:35:35 +00:00
|
|
|
[WGPEER_A_PRESHARED_KEY] = { },
|
|
|
|
|
[WGPEER_A_FLAGS] = { .type = NLA_U32 },
|
|
|
|
|
[WGPEER_A_ENDPOINT] = { },
|
|
|
|
|
[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
|
|
|
|
|
[WGPEER_A_LAST_HANDSHAKE_TIME] = { },
|
|
|
|
|
[WGPEER_A_RX_BYTES] = { .type = NLA_U64 },
|
|
|
|
|
[WGPEER_A_TX_BYTES] = { .type = NLA_U64 },
|
|
|
|
|
[WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
WireGuardPeerConstruct *peer_c;
|
2018-03-13 13:35:35 +00:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (tb, peer_attr, policy) < 0)
|
2018-09-04 11:16:28 +02:00
|
|
|
return FALSE;
|
2018-03-13 13:35:35 +00:00
|
|
|
|
2018-09-07 09:53:52 +02:00
|
|
|
if (!tb[WGPEER_A_PUBLIC_KEY])
|
2018-09-04 11:16:28 +02:00
|
|
|
return FALSE;
|
2018-03-13 13:35:35 +00:00
|
|
|
|
|
|
|
|
/* a peer with the same public key as last peer is just a continuation for extra AllowedIPs */
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
peer_c = c_list_last_entry (peers, WireGuardPeerConstruct, lst);
|
|
|
|
|
if ( peer_c
|
|
|
|
|
&& !memcmp (nla_data (tb[WGPEER_A_PUBLIC_KEY]), peer_c->data.public_key, NMP_WIREGUARD_PUBLIC_KEY_LEN)) {
|
|
|
|
|
G_STATIC_ASSERT_EXPR (NMP_WIREGUARD_PUBLIC_KEY_LEN == sizeof (peer_c->data.public_key));
|
|
|
|
|
/* this message is a continuation of the previous peer.
|
|
|
|
|
* Only parse WGPEER_A_ALLOWEDIPS below. */
|
|
|
|
|
}
|
2018-09-04 11:16:28 +02:00
|
|
|
else {
|
|
|
|
|
/* otherwise, start a new peer */
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
peer_c = g_slice_new0 (WireGuardPeerConstruct);
|
|
|
|
|
c_list_link_tail (peers, &peer_c->lst);
|
2018-09-04 11:16:28 +02:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
nla_memcpy (&peer_c->data.public_key, tb[WGPEER_A_PUBLIC_KEY], sizeof (peer_c->data.public_key));
|
2018-09-04 11:16:28 +02:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
if (tb[WGPEER_A_PRESHARED_KEY]) {
|
|
|
|
|
nla_memcpy (&peer_c->data.preshared_key, tb[WGPEER_A_PRESHARED_KEY], sizeof (peer_c->data.preshared_key));
|
|
|
|
|
/* FIXME(netlink-bzero-secret) */
|
|
|
|
|
nm_explicit_bzero (nla_data (tb[WGPEER_A_PRESHARED_KEY]),
|
|
|
|
|
nla_len (tb[WGPEER_A_PRESHARED_KEY]));
|
|
|
|
|
}
|
2019-01-13 09:46:19 +01:00
|
|
|
|
|
|
|
|
nm_sock_addr_union_cpy_untrusted (&peer_c->data.endpoint,
|
|
|
|
|
tb[WGPEER_A_ENDPOINT] ? nla_data (tb[WGPEER_A_ENDPOINT]) : NULL,
|
2019-02-21 08:37:40 +01:00
|
|
|
tb[WGPEER_A_ENDPOINT] ? nla_len (tb[WGPEER_A_ENDPOINT]) : 0);
|
2019-01-13 09:46:19 +01:00
|
|
|
|
2018-09-07 09:53:52 +02:00
|
|
|
if (tb[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL])
|
2019-02-18 10:12:22 +01:00
|
|
|
peer_c->data.persistent_keepalive_interval = nla_get_u16 (tb[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]);
|
2018-09-07 09:53:52 +02:00
|
|
|
if (tb[WGPEER_A_LAST_HANDSHAKE_TIME])
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
nla_memcpy (&peer_c->data.last_handshake_time, tb[WGPEER_A_LAST_HANDSHAKE_TIME], sizeof (peer_c->data.last_handshake_time));
|
2018-09-07 09:53:52 +02:00
|
|
|
if (tb[WGPEER_A_RX_BYTES])
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
peer_c->data.rx_bytes = nla_get_u64 (tb[WGPEER_A_RX_BYTES]);
|
2018-09-07 09:53:52 +02:00
|
|
|
if (tb[WGPEER_A_TX_BYTES])
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
peer_c->data.tx_bytes = nla_get_u64 (tb[WGPEER_A_TX_BYTES]);
|
2018-03-13 13:35:35 +00:00
|
|
|
}
|
|
|
|
|
|
2018-09-07 09:53:52 +02:00
|
|
|
if (tb[WGPEER_A_ALLOWEDIPS]) {
|
2018-03-13 13:35:35 +00:00
|
|
|
struct nlattr *attr;
|
|
|
|
|
int rem;
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
GArray *allowed_ips = *p_allowed_ips;
|
2018-03-13 13:35:35 +00:00
|
|
|
|
2018-09-07 09:53:52 +02:00
|
|
|
nla_for_each_nested (attr, tb[WGPEER_A_ALLOWEDIPS], rem) {
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
if (!allowed_ips) {
|
|
|
|
|
allowed_ips = g_array_new (FALSE, FALSE, sizeof (NMPWireGuardAllowedIP));
|
|
|
|
|
*p_allowed_ips = allowed_ips;
|
|
|
|
|
g_array_set_size (allowed_ips, 1);
|
|
|
|
|
} else
|
|
|
|
|
g_array_set_size (allowed_ips, allowed_ips->len + 1);
|
|
|
|
|
|
|
|
|
|
if (!_wireguard_update_from_allowed_ips_nla (&g_array_index (allowed_ips,
|
|
|
|
|
NMPWireGuardAllowedIP,
|
|
|
|
|
allowed_ips->len - 1),
|
|
|
|
|
attr)) {
|
|
|
|
|
/* we ignore the error of parsing one allowed-ip. */
|
|
|
|
|
g_array_set_size (allowed_ips, allowed_ips->len - 1);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!peer_c->data._construct_idx_end)
|
|
|
|
|
peer_c->data._construct_idx_start = allowed_ips->len - 1;
|
|
|
|
|
peer_c->data._construct_idx_end = allowed_ips->len;
|
2018-03-13 13:35:35 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
return TRUE;
|
2018-03-13 13:35:35 +00:00
|
|
|
}
|
|
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
typedef struct {
|
|
|
|
|
const int ifindex;
|
|
|
|
|
NMPObject *obj;
|
|
|
|
|
CList peers;
|
|
|
|
|
GArray *allowed_ips;
|
|
|
|
|
} WireGuardParseData;
|
|
|
|
|
|
2018-03-13 13:35:35 +00:00
|
|
|
static int
|
|
|
|
|
_wireguard_get_device_cb (struct nl_msg *msg, void *arg)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2018-03-13 13:35:35 +00:00
|
|
|
[WGDEVICE_A_IFINDEX] = { .type = NLA_U32 },
|
|
|
|
|
[WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .maxlen = IFNAMSIZ },
|
|
|
|
|
[WGDEVICE_A_PRIVATE_KEY] = { },
|
|
|
|
|
[WGDEVICE_A_PUBLIC_KEY] = { },
|
|
|
|
|
[WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
|
|
|
|
|
[WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
|
|
|
|
|
[WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
|
|
|
|
|
[WGDEVICE_A_PEERS] = { .type = NLA_NESTED },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
WireGuardParseData *parse_data = arg;
|
2018-03-13 13:35:35 +00:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (genlmsg_parse_arr (nlmsg_hdr (msg), 0, tb, policy) < 0)
|
2018-09-04 11:16:28 +02:00
|
|
|
return NL_SKIP;
|
2018-03-13 13:35:35 +00:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
if (tb[WGDEVICE_A_IFINDEX]) {
|
|
|
|
|
int ifindex;
|
|
|
|
|
|
|
|
|
|
ifindex = (int) nla_get_u32 (tb[WGDEVICE_A_IFINDEX]);
|
|
|
|
|
if ( ifindex <= 0
|
|
|
|
|
|| parse_data->ifindex != ifindex)
|
|
|
|
|
return NL_SKIP;
|
|
|
|
|
} else {
|
|
|
|
|
if (!parse_data->obj)
|
|
|
|
|
return NL_SKIP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (parse_data->obj) {
|
|
|
|
|
/* we already have an object instance. This means the netlink message
|
|
|
|
|
* is a continuation, only providing more WGDEVICE_A_PEERS data below. */
|
|
|
|
|
} else {
|
|
|
|
|
NMPObject *obj;
|
|
|
|
|
NMPlatformLnkWireGuard *props;
|
|
|
|
|
|
|
|
|
|
obj = nmp_object_new (NMP_OBJECT_TYPE_LNK_WIREGUARD, NULL);
|
|
|
|
|
props = &obj->lnk_wireguard;
|
|
|
|
|
|
|
|
|
|
if (tb[WGDEVICE_A_PRIVATE_KEY]) {
|
|
|
|
|
nla_memcpy (props->private_key, tb[WGDEVICE_A_PRIVATE_KEY], sizeof (props->private_key));
|
|
|
|
|
/* FIXME(netlink-bzero-secret): extend netlink library to wipe memory. For now,
|
|
|
|
|
* just hack it here (yes, this does not cover all places where the
|
|
|
|
|
* private key was copied). */
|
|
|
|
|
nm_explicit_bzero (nla_data (tb[WGDEVICE_A_PRIVATE_KEY]),
|
|
|
|
|
nla_len (tb[WGDEVICE_A_PRIVATE_KEY]));
|
|
|
|
|
}
|
|
|
|
|
if (tb[WGDEVICE_A_PUBLIC_KEY])
|
|
|
|
|
nla_memcpy (props->public_key, tb[WGDEVICE_A_PUBLIC_KEY], sizeof (props->public_key));
|
|
|
|
|
if (tb[WGDEVICE_A_LISTEN_PORT])
|
|
|
|
|
props->listen_port = nla_get_u16 (tb[WGDEVICE_A_LISTEN_PORT]);
|
|
|
|
|
if (tb[WGDEVICE_A_FWMARK])
|
|
|
|
|
props->fwmark = nla_get_u32 (tb[WGDEVICE_A_FWMARK]);
|
|
|
|
|
|
|
|
|
|
parse_data->obj = obj;
|
|
|
|
|
}
|
2018-03-13 13:35:35 +00:00
|
|
|
|
2018-09-07 09:53:52 +02:00
|
|
|
if (tb[WGDEVICE_A_PEERS]) {
|
2018-03-13 13:35:35 +00:00
|
|
|
struct nlattr *attr;
|
|
|
|
|
int rem;
|
|
|
|
|
|
2018-09-07 09:53:52 +02:00
|
|
|
nla_for_each_nested (attr, tb[WGDEVICE_A_PEERS], rem) {
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
if (!_wireguard_update_from_peers_nla (&parse_data->peers, &parse_data->allowed_ips, attr)) {
|
|
|
|
|
/* we ignore the error of parsing one peer.
|
|
|
|
|
* _wireguard_update_from_peers_nla() leaves the @peers array in the
|
|
|
|
|
* desired state. */
|
|
|
|
|
}
|
2018-03-13 13:35:35 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NL_OK;
|
|
|
|
|
}
|
|
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
static const NMPObject *
|
|
|
|
|
_wireguard_read_info (NMPlatform *platform /* used only as logging context */,
|
2018-12-25 18:41:28 +01:00
|
|
|
struct nl_sock *genl,
|
|
|
|
|
int wireguard_family_id,
|
|
|
|
|
int ifindex)
|
2018-09-04 16:43:44 +02:00
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *msg = NULL;
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
NMPObject *obj = NULL;
|
|
|
|
|
WireGuardPeerConstruct *peer_c;
|
|
|
|
|
WireGuardPeerConstruct *peer_c_safe;
|
|
|
|
|
gs_unref_array GArray *allowed_ips = NULL;
|
|
|
|
|
WireGuardParseData parse_data = {
|
|
|
|
|
.ifindex = ifindex,
|
2018-09-04 16:43:44 +02:00
|
|
|
};
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
guint i;
|
2018-09-04 16:43:44 +02:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
nm_assert (genl);
|
|
|
|
|
nm_assert (wireguard_family_id >= 0);
|
|
|
|
|
nm_assert (ifindex > 0);
|
2018-09-04 16:43:44 +02:00
|
|
|
|
2019-01-11 17:07:03 -02:00
|
|
|
_LOGT ("wireguard: fetching information for ifindex %d (genl-id %d)...", ifindex, wireguard_family_id);
|
2018-12-25 18:41:28 +01:00
|
|
|
|
2018-09-04 16:43:44 +02:00
|
|
|
msg = nlmsg_alloc ();
|
|
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
if (!genlmsg_put (msg,
|
|
|
|
|
NL_AUTO_PORT,
|
|
|
|
|
NL_AUTO_SEQ,
|
|
|
|
|
wireguard_family_id,
|
|
|
|
|
0,
|
|
|
|
|
NLM_F_DUMP,
|
|
|
|
|
WG_CMD_GET_DEVICE,
|
|
|
|
|
1))
|
|
|
|
|
return NULL;
|
2018-09-04 16:43:44 +02:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
NLA_PUT_U32 (msg, WGDEVICE_A_IFINDEX, (guint32) ifindex);
|
2018-09-04 16:43:44 +02:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
if (nl_send_auto (genl, msg) < 0)
|
|
|
|
|
return NULL;
|
2018-09-04 16:43:44 +02:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
c_list_init (&parse_data.peers);
|
|
|
|
|
|
|
|
|
|
/* we ignore errors, and return whatever we could successfully
|
|
|
|
|
* parse. */
|
|
|
|
|
nl_recvmsgs (genl,
|
|
|
|
|
&((const struct nl_cb) {
|
|
|
|
|
.valid_cb = _wireguard_get_device_cb,
|
|
|
|
|
.valid_arg = (gpointer) &parse_data,
|
|
|
|
|
}));
|
|
|
|
|
|
|
|
|
|
/* unpack: transfer ownership */
|
|
|
|
|
obj = parse_data.obj;
|
|
|
|
|
allowed_ips = parse_data.allowed_ips;
|
|
|
|
|
|
|
|
|
|
if (!obj) {
|
|
|
|
|
while ((peer_c = c_list_first_entry (&parse_data.peers, WireGuardPeerConstruct, lst))) {
|
|
|
|
|
c_list_unlink_stale (&peer_c->lst);
|
|
|
|
|
nm_explicit_bzero (&peer_c->data.preshared_key, sizeof (peer_c->data.preshared_key));
|
|
|
|
|
g_slice_free (WireGuardPeerConstruct, peer_c);
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
2018-09-04 16:43:44 +02:00
|
|
|
}
|
|
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
/* we receive peers/allowed-ips possibly in separate netlink messages. Hence, while
|
|
|
|
|
* parsing the dump, we don't know upfront how many peers/allowed-ips we will receive.
|
|
|
|
|
*
|
|
|
|
|
* We solve that, by collecting all peers with a CList. It's done this way,
|
|
|
|
|
* because a GArray would require growing the array, but we want to bzero()
|
|
|
|
|
* the preshared-key of each peer while reallocating. The CList apprach avoids
|
|
|
|
|
* that.
|
|
|
|
|
*
|
|
|
|
|
* For allowed-ips, we instead track one GArray, which are all appended
|
|
|
|
|
* there. The realloc/resize of the GArray is fine there. However,
|
|
|
|
|
* while we build the GArray, we don't yet have the final pointers.
|
|
|
|
|
* Hence, while constructing, we track the indexes with peer->_construct_idx_*
|
2018-09-14 23:49:20 -04:00
|
|
|
* fields. These indexes must be converted to actual pointers blow.
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
*
|
|
|
|
|
* This is all done during parsing. In the final NMPObjectLnkWireGuard we
|
|
|
|
|
* don't want the CList anymore and repackage the NMPObject tightly. The
|
|
|
|
|
* reason is, that NMPObject instances are immutable and long-living. Spend
|
|
|
|
|
* a bit effort below during construction to obtain a most suitable representation
|
|
|
|
|
* in this regard. */
|
|
|
|
|
obj->_lnk_wireguard.peers_len = c_list_length (&parse_data.peers);
|
|
|
|
|
obj->_lnk_wireguard.peers = obj->_lnk_wireguard.peers_len > 0
|
|
|
|
|
? g_new (NMPWireGuardPeer, obj->_lnk_wireguard.peers_len)
|
|
|
|
|
: NULL;
|
|
|
|
|
|
|
|
|
|
/* duplicate allowed_ips instead of using the pointer. The GArray possibly has more
|
|
|
|
|
* space allocated then we need, and we want to get rid of this excess buffer.
|
|
|
|
|
* Note that NMPObject instance is possibly put into the cache and long-living. */
|
|
|
|
|
obj->_lnk_wireguard._allowed_ips_buf_len = allowed_ips ? allowed_ips->len : 0u;
|
|
|
|
|
obj->_lnk_wireguard._allowed_ips_buf = obj->_lnk_wireguard._allowed_ips_buf_len > 0
|
|
|
|
|
? (NMPWireGuardAllowedIP *) nm_memdup (allowed_ips->data,
|
|
|
|
|
sizeof (NMPWireGuardAllowedIP) * allowed_ips->len)
|
|
|
|
|
: NULL;
|
|
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
|
c_list_for_each_entry_safe (peer_c, peer_c_safe, &parse_data.peers, lst) {
|
|
|
|
|
NMPWireGuardPeer *peer = (NMPWireGuardPeer *) &obj->_lnk_wireguard.peers[i++];
|
|
|
|
|
|
|
|
|
|
*peer = peer_c->data;
|
|
|
|
|
|
|
|
|
|
c_list_unlink_stale (&peer_c->lst);
|
|
|
|
|
nm_explicit_bzero (&peer_c->data.preshared_key, sizeof (peer_c->data.preshared_key));
|
|
|
|
|
g_slice_free (WireGuardPeerConstruct, peer_c);
|
|
|
|
|
|
|
|
|
|
if (peer->_construct_idx_end != 0) {
|
|
|
|
|
guint len;
|
|
|
|
|
|
|
|
|
|
nm_assert (obj->_lnk_wireguard._allowed_ips_buf);
|
|
|
|
|
nm_assert (peer->_construct_idx_end > peer->_construct_idx_start);
|
|
|
|
|
nm_assert (peer->_construct_idx_start < obj->_lnk_wireguard._allowed_ips_buf_len);
|
|
|
|
|
nm_assert (peer->_construct_idx_end <= obj->_lnk_wireguard._allowed_ips_buf_len);
|
|
|
|
|
|
|
|
|
|
len = peer->_construct_idx_end - peer->_construct_idx_start;
|
|
|
|
|
peer->allowed_ips = &obj->_lnk_wireguard._allowed_ips_buf[peer->_construct_idx_start];
|
|
|
|
|
peer->allowed_ips_len = len;
|
|
|
|
|
} else {
|
|
|
|
|
nm_assert (!peer->_construct_idx_start);
|
|
|
|
|
nm_assert (!peer->_construct_idx_end);
|
|
|
|
|
peer->allowed_ips = NULL;
|
|
|
|
|
peer->allowed_ips_len = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-09-04 16:43:44 +02:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
return obj;
|
2018-09-04 16:43:44 +02:00
|
|
|
|
|
|
|
|
nla_put_failure:
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
g_return_val_if_reached (NULL);
|
2018-09-04 16:43:44 +02:00
|
|
|
}
|
2018-03-13 13:35:35 +00:00
|
|
|
|
2018-12-25 18:41:28 +01:00
|
|
|
static int
|
|
|
|
|
_wireguard_get_family_id (NMPlatform *platform, int ifindex_try)
|
|
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
int wireguard_family_id = -1;
|
|
|
|
|
|
|
|
|
|
if (ifindex_try > 0) {
|
|
|
|
|
const NMPlatformLink *plink;
|
|
|
|
|
|
|
|
|
|
if (nm_platform_link_get_lnk_wireguard (platform, ifindex_try, &plink))
|
|
|
|
|
wireguard_family_id = NMP_OBJECT_UP_CAST (plink)->_link.wireguard_family_id;
|
|
|
|
|
}
|
|
|
|
|
if (wireguard_family_id < 0)
|
|
|
|
|
wireguard_family_id = genl_ctrl_resolve (priv->genl, "wireguard");
|
|
|
|
|
return wireguard_family_id;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const NMPObject *
|
|
|
|
|
_wireguard_refresh_link (NMPlatform *platform,
|
|
|
|
|
int wireguard_family_id,
|
|
|
|
|
int ifindex)
|
|
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
nm_auto_nmpobj const NMPObject *obj_old = NULL;
|
|
|
|
|
nm_auto_nmpobj const NMPObject *obj_new = NULL;
|
|
|
|
|
nm_auto_nmpobj const NMPObject *lnk_new = NULL;
|
|
|
|
|
NMPCacheOpsType cache_op;
|
|
|
|
|
const NMPObject *plink = NULL;
|
|
|
|
|
nm_auto_nmpobj NMPObject *obj = NULL;
|
|
|
|
|
|
|
|
|
|
nm_assert (wireguard_family_id >= 0);
|
|
|
|
|
nm_assert (ifindex > 0);
|
|
|
|
|
|
|
|
|
|
nm_platform_process_events (platform);
|
|
|
|
|
|
|
|
|
|
plink = nm_platform_link_get_obj (platform, ifindex, TRUE);
|
|
|
|
|
|
|
|
|
|
if ( !plink
|
|
|
|
|
|| plink->link.type != NM_LINK_TYPE_WIREGUARD) {
|
|
|
|
|
nm_platform_link_refresh (platform, ifindex);
|
|
|
|
|
plink = nm_platform_link_get_obj (platform, ifindex, TRUE);
|
|
|
|
|
if ( !plink
|
|
|
|
|
|| plink->link.type != NM_LINK_TYPE_WIREGUARD)
|
|
|
|
|
return NULL;
|
|
|
|
|
if (NMP_OBJECT_GET_TYPE (plink->_link.netlink.lnk) == NMP_OBJECT_TYPE_LNK_WIREGUARD)
|
|
|
|
|
lnk_new = nmp_object_ref (plink->_link.netlink.lnk);
|
|
|
|
|
} else {
|
|
|
|
|
lnk_new = _wireguard_read_info (platform,
|
|
|
|
|
priv->genl,
|
|
|
|
|
wireguard_family_id,
|
|
|
|
|
ifindex);
|
|
|
|
|
if (!lnk_new) {
|
|
|
|
|
if (NMP_OBJECT_GET_TYPE (plink->_link.netlink.lnk) == NMP_OBJECT_TYPE_LNK_WIREGUARD)
|
|
|
|
|
lnk_new = nmp_object_ref (plink->_link.netlink.lnk);
|
|
|
|
|
} else if (nmp_object_equal (plink->_link.netlink.lnk, lnk_new)) {
|
|
|
|
|
nmp_object_unref (lnk_new);
|
|
|
|
|
lnk_new = nmp_object_ref (plink->_link.netlink.lnk);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ( plink->_link.wireguard_family_id == wireguard_family_id
|
|
|
|
|
&& plink->_link.netlink.lnk == lnk_new)
|
|
|
|
|
return plink;
|
|
|
|
|
|
|
|
|
|
/* we use nmp_cache_update_netlink() to re-inject the new object into the cache.
|
|
|
|
|
* For that, we need to clone it, and tweak it so that it's suitable. It's a bit
|
|
|
|
|
* of a hack, in particular that we need to clear driver and udev-device. */
|
|
|
|
|
obj = nmp_object_clone (plink, FALSE);
|
|
|
|
|
obj->_link.wireguard_family_id = wireguard_family_id;
|
|
|
|
|
nmp_object_unref (obj->_link.netlink.lnk);
|
|
|
|
|
obj->_link.netlink.lnk = g_steal_pointer (&lnk_new);
|
|
|
|
|
obj->link.driver = NULL;
|
|
|
|
|
nm_clear_pointer (&obj->_link.udev.device, udev_device_unref);
|
|
|
|
|
|
|
|
|
|
cache_op = nmp_cache_update_netlink (nm_platform_get_cache (platform),
|
|
|
|
|
obj,
|
|
|
|
|
FALSE,
|
|
|
|
|
&obj_old,
|
|
|
|
|
&obj_new);
|
|
|
|
|
nm_assert (NM_IN_SET (cache_op, NMP_CACHE_OPS_UPDATED));
|
|
|
|
|
if (cache_op != NMP_CACHE_OPS_UNCHANGED) {
|
|
|
|
|
cache_on_change (platform, cache_op, obj_old, obj_new);
|
|
|
|
|
nm_platform_cache_update_emit_signal (platform, cache_op, obj_old, obj_new);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nm_assert ( !obj_new
|
|
|
|
|
|| ( NMP_OBJECT_GET_TYPE (obj_new) == NMP_OBJECT_TYPE_LINK
|
|
|
|
|
&& obj_new->link.type == NM_LINK_TYPE_WIREGUARD
|
|
|
|
|
&& ( !obj_new->_link.netlink.lnk
|
|
|
|
|
|| NMP_OBJECT_GET_TYPE (obj_new->_link.netlink.lnk) == NMP_OBJECT_TYPE_LNK_WIREGUARD)));
|
|
|
|
|
return obj_new;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
_wireguard_create_change_nlmsgs (NMPlatform *platform,
|
|
|
|
|
int ifindex,
|
|
|
|
|
int wireguard_family_id,
|
|
|
|
|
const NMPlatformLnkWireGuard *lnk_wireguard,
|
|
|
|
|
const NMPWireGuardPeer *peers,
|
2019-02-11 11:07:06 +01:00
|
|
|
const NMPlatformWireGuardChangePeerFlags *peer_flags,
|
2018-12-25 18:41:28 +01:00
|
|
|
guint peers_len,
|
2019-02-10 15:22:26 +01:00
|
|
|
NMPlatformWireGuardChangeFlags change_flags,
|
2018-12-25 18:41:28 +01:00
|
|
|
GPtrArray **out_msgs)
|
|
|
|
|
{
|
|
|
|
|
gs_unref_ptrarray GPtrArray *msgs = NULL;
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *msg = NULL;
|
|
|
|
|
const guint IDX_NIL = G_MAXUINT;
|
|
|
|
|
guint idx_peer_curr;
|
|
|
|
|
guint idx_allowed_ips_curr;
|
|
|
|
|
struct nlattr *nest_peers;
|
|
|
|
|
struct nlattr *nest_curr_peer;
|
|
|
|
|
struct nlattr *nest_allowed_ips;
|
|
|
|
|
struct nlattr *nest_curr_allowed_ip;
|
2019-02-11 11:07:06 +01:00
|
|
|
NMPlatformWireGuardChangePeerFlags p_flags = NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_DEFAULT;
|
2018-12-25 18:41:28 +01:00
|
|
|
|
|
|
|
|
#define _nla_nest_end(msg, nest_start) \
|
|
|
|
|
G_STMT_START { \
|
|
|
|
|
if (nla_nest_end ((msg), (nest_start)) < 0) \
|
|
|
|
|
g_return_val_if_reached (-NME_BUG); \
|
|
|
|
|
} G_STMT_END
|
|
|
|
|
|
|
|
|
|
/* Adapted from LGPL-2.1+ code [1].
|
|
|
|
|
*
|
|
|
|
|
* [1] https://git.zx2c4.com/WireGuard/tree/contrib/examples/embeddable-wg-library/wireguard.c?id=5e99a6d43fe2351adf36c786f5ea2086a8fe7ab8#n1073 */
|
|
|
|
|
|
|
|
|
|
idx_peer_curr = IDX_NIL;
|
|
|
|
|
idx_allowed_ips_curr = IDX_NIL;
|
|
|
|
|
|
|
|
|
|
/* TODO: for the moment, we always reset all peers and allowed-ips (WGDEVICE_F_REPLACE_PEERS, WGPEER_F_REPLACE_ALLOWEDIPS).
|
|
|
|
|
* The platform API should be extended to also support partial updates. In particular, configuring the same configuration
|
|
|
|
|
* multiple times, should not clear and re-add all settings, but rather sync the existing settings with the desired configuration. */
|
|
|
|
|
|
|
|
|
|
again:
|
|
|
|
|
|
|
|
|
|
msg = nlmsg_alloc ();
|
|
|
|
|
if (!genlmsg_put (msg,
|
|
|
|
|
NL_AUTO_PORT,
|
|
|
|
|
NL_AUTO_SEQ,
|
|
|
|
|
wireguard_family_id,
|
|
|
|
|
0,
|
|
|
|
|
NLM_F_REQUEST,
|
|
|
|
|
WG_CMD_SET_DEVICE,
|
|
|
|
|
1))
|
|
|
|
|
g_return_val_if_reached (-NME_BUG);
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U32 (msg, WGDEVICE_A_IFINDEX, (guint32) ifindex);
|
|
|
|
|
|
|
|
|
|
if (idx_peer_curr == IDX_NIL) {
|
2019-02-11 10:57:35 +01:00
|
|
|
guint32 flags;
|
|
|
|
|
|
|
|
|
|
if (NM_FLAGS_HAS (change_flags, NM_PLATFORM_WIREGUARD_CHANGE_FLAG_HAS_PRIVATE_KEY))
|
|
|
|
|
NLA_PUT (msg, WGDEVICE_A_PRIVATE_KEY, sizeof (lnk_wireguard->private_key), lnk_wireguard->private_key);
|
|
|
|
|
if (NM_FLAGS_HAS (change_flags, NM_PLATFORM_WIREGUARD_CHANGE_FLAG_HAS_LISTEN_PORT))
|
|
|
|
|
NLA_PUT_U16 (msg, WGDEVICE_A_LISTEN_PORT, lnk_wireguard->listen_port);
|
|
|
|
|
if (NM_FLAGS_HAS (change_flags, NM_PLATFORM_WIREGUARD_CHANGE_FLAG_HAS_FWMARK))
|
|
|
|
|
NLA_PUT_U32 (msg, WGDEVICE_A_FWMARK, lnk_wireguard->fwmark);
|
|
|
|
|
|
|
|
|
|
flags = 0;
|
|
|
|
|
if (NM_FLAGS_HAS (change_flags, NM_PLATFORM_WIREGUARD_CHANGE_FLAG_REPLACE_PEERS))
|
|
|
|
|
flags |= WGDEVICE_F_REPLACE_PEERS;
|
|
|
|
|
NLA_PUT_U32 (msg, WGDEVICE_A_FLAGS, flags);
|
2018-12-25 18:41:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (peers_len == 0)
|
|
|
|
|
goto send;
|
|
|
|
|
|
|
|
|
|
nest_curr_peer = NULL;
|
|
|
|
|
nest_allowed_ips = NULL;
|
|
|
|
|
nest_curr_allowed_ip = NULL;
|
|
|
|
|
|
|
|
|
|
nest_peers = nla_nest_start (msg, WGDEVICE_A_PEERS);
|
|
|
|
|
if (!nest_peers)
|
|
|
|
|
g_return_val_if_reached (-NME_BUG);
|
|
|
|
|
|
|
|
|
|
if (idx_peer_curr == IDX_NIL)
|
|
|
|
|
idx_peer_curr = 0;
|
|
|
|
|
for (; idx_peer_curr < peers_len; idx_peer_curr++) {
|
|
|
|
|
const NMPWireGuardPeer *p = &peers[idx_peer_curr];
|
|
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
if (peer_flags) {
|
|
|
|
|
p_flags = peer_flags[idx_peer_curr];
|
|
|
|
|
if (!NM_FLAGS_ANY (p_flags, NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_REMOVE_ME
|
|
|
|
|
| NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_HAS_PRESHARED_KEY
|
|
|
|
|
| NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_HAS_KEEPALIVE_INTERVAL
|
|
|
|
|
| NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_HAS_ENDPOINT
|
|
|
|
|
| NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_HAS_ALLOWEDIPS
|
|
|
|
|
| NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_REPLACE_ALLOWEDIPS)) {
|
|
|
|
|
/* no flags set. We take that as indication to skip configuring the peer
|
|
|
|
|
* entirely. */
|
|
|
|
|
nm_assert (p_flags == NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_NONE);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-25 18:41:28 +01:00
|
|
|
nest_curr_peer = nla_nest_start (msg, 0);
|
|
|
|
|
if (!nest_curr_peer)
|
|
|
|
|
goto toobig_peers;
|
|
|
|
|
|
|
|
|
|
if (nla_put (msg, WGPEER_A_PUBLIC_KEY, NMP_WIREGUARD_PUBLIC_KEY_LEN, p->public_key) < 0)
|
|
|
|
|
goto toobig_peers;
|
|
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
if (NM_FLAGS_HAS (p_flags, NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_REMOVE_ME)) {
|
|
|
|
|
/* all other p_flags are silently ignored. */
|
|
|
|
|
if (nla_put_uint32 (msg, WGPEER_A_FLAGS, WGPEER_F_REMOVE_ME) < 0)
|
2018-12-25 18:41:28 +01:00
|
|
|
goto toobig_peers;
|
2019-02-11 11:07:06 +01:00
|
|
|
} else {
|
2018-12-25 18:41:28 +01:00
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
if (idx_allowed_ips_curr == IDX_NIL) {
|
|
|
|
|
if ( NM_FLAGS_HAS (p_flags, NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_HAS_PRESHARED_KEY)
|
|
|
|
|
&& nla_put (msg, WGPEER_A_PRESHARED_KEY, sizeof (p->preshared_key), p->preshared_key) < 0)
|
|
|
|
|
goto toobig_peers;
|
2018-12-25 18:41:28 +01:00
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
if ( NM_FLAGS_HAS (p_flags, NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_HAS_KEEPALIVE_INTERVAL)
|
|
|
|
|
&& nla_put_uint16 (msg, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL, p->persistent_keepalive_interval) < 0)
|
2019-01-13 09:44:29 +01:00
|
|
|
goto toobig_peers;
|
2018-12-25 18:41:28 +01:00
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
if ( NM_FLAGS_HAS (p_flags, NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_REPLACE_ALLOWEDIPS)
|
|
|
|
|
&& nla_put_uint32 (msg, WGPEER_A_FLAGS, WGPEER_F_REPLACE_ALLOWEDIPS) < 0)
|
|
|
|
|
goto toobig_peers;
|
2018-12-25 18:41:28 +01:00
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
if (NM_FLAGS_HAS (p_flags, NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_HAS_ENDPOINT)) {
|
|
|
|
|
if (NM_IN_SET (p->endpoint.sa.sa_family, AF_INET, AF_INET6)) {
|
|
|
|
|
if (nla_put (msg,
|
|
|
|
|
WGPEER_A_ENDPOINT,
|
|
|
|
|
p->endpoint.sa.sa_family == AF_INET
|
|
|
|
|
? sizeof (p->endpoint.in)
|
|
|
|
|
: sizeof (p->endpoint.in6),
|
|
|
|
|
&p->endpoint) < 0)
|
|
|
|
|
goto toobig_peers;
|
|
|
|
|
} else {
|
|
|
|
|
/* I think there is no way to clear an endpoint, though there shold be. */
|
|
|
|
|
nm_assert (p->endpoint.sa.sa_family == AF_UNSPEC);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-12-25 18:41:28 +01:00
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
if ( NM_FLAGS_HAS (p_flags, NM_PLATFORM_WIREGUARD_CHANGE_PEER_FLAG_HAS_ALLOWEDIPS)
|
|
|
|
|
&& p->allowed_ips_len > 0) {
|
|
|
|
|
if (idx_allowed_ips_curr == IDX_NIL)
|
|
|
|
|
idx_allowed_ips_curr = 0;
|
2018-12-25 18:41:28 +01:00
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
nest_allowed_ips = nla_nest_start (msg, WGPEER_A_ALLOWEDIPS);
|
|
|
|
|
if (!nest_allowed_ips)
|
2018-12-25 18:41:28 +01:00
|
|
|
goto toobig_allowedips;
|
|
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
for (; idx_allowed_ips_curr < p->allowed_ips_len; idx_allowed_ips_curr++) {
|
|
|
|
|
const NMPWireGuardAllowedIP *aip = &p->allowed_ips[idx_allowed_ips_curr];
|
2018-12-25 18:41:28 +01:00
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
nest_curr_allowed_ip = nla_nest_start (msg, 0);
|
|
|
|
|
if (!nest_curr_allowed_ip)
|
|
|
|
|
goto toobig_allowedips;
|
2018-12-25 18:41:28 +01:00
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
g_return_val_if_fail (NM_IN_SET (aip->family, AF_INET, AF_INET6), -NME_BUG);
|
|
|
|
|
|
|
|
|
|
if (nla_put_uint16 (msg, WGALLOWEDIP_A_FAMILY, aip->family) < 0)
|
|
|
|
|
goto toobig_allowedips;
|
|
|
|
|
if (nla_put (msg,
|
|
|
|
|
WGALLOWEDIP_A_IPADDR,
|
|
|
|
|
nm_utils_addr_family_to_size (aip->family),
|
|
|
|
|
&aip->addr) < 0)
|
|
|
|
|
goto toobig_allowedips;
|
|
|
|
|
if (nla_put_uint8 (msg, WGALLOWEDIP_A_CIDR_MASK, aip->mask) < 0)
|
|
|
|
|
goto toobig_allowedips;
|
2018-12-25 18:41:28 +01:00
|
|
|
|
2019-02-11 11:07:06 +01:00
|
|
|
_nla_nest_end (msg, nest_curr_allowed_ip);
|
|
|
|
|
nest_curr_allowed_ip = NULL;
|
|
|
|
|
}
|
|
|
|
|
idx_allowed_ips_curr = IDX_NIL;
|
|
|
|
|
|
|
|
|
|
_nla_nest_end (msg, nest_allowed_ips);
|
|
|
|
|
nest_allowed_ips = NULL;
|
|
|
|
|
}
|
2018-12-25 18:41:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_nla_nest_end (msg, nest_curr_peer);
|
|
|
|
|
nest_curr_peer = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_nla_nest_end (msg, nest_peers);
|
|
|
|
|
goto send;
|
|
|
|
|
|
|
|
|
|
toobig_allowedips:
|
|
|
|
|
if (nest_curr_allowed_ip)
|
|
|
|
|
nla_nest_cancel (msg, nest_curr_allowed_ip);
|
|
|
|
|
if (nest_allowed_ips)
|
|
|
|
|
nla_nest_cancel (msg, nest_allowed_ips);
|
|
|
|
|
_nla_nest_end (msg, nest_curr_peer);
|
|
|
|
|
_nla_nest_end (msg, nest_peers);
|
|
|
|
|
goto send;
|
|
|
|
|
|
|
|
|
|
toobig_peers:
|
|
|
|
|
if (nest_curr_peer)
|
|
|
|
|
nla_nest_cancel (msg, nest_curr_peer);
|
|
|
|
|
_nla_nest_end (msg, nest_peers);
|
|
|
|
|
goto send;
|
|
|
|
|
|
|
|
|
|
send:
|
|
|
|
|
if (!msgs)
|
|
|
|
|
msgs = g_ptr_array_new_with_free_func ((GDestroyNotify) nlmsg_free);
|
|
|
|
|
g_ptr_array_add (msgs, g_steal_pointer (&msg));
|
|
|
|
|
|
|
|
|
|
if ( idx_peer_curr != IDX_NIL
|
|
|
|
|
&& idx_peer_curr < peers_len)
|
|
|
|
|
goto again;
|
|
|
|
|
|
|
|
|
|
NM_SET_OUT (out_msgs, g_steal_pointer (&msgs));
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (-NME_BUG);
|
|
|
|
|
|
|
|
|
|
#undef _nla_nest_end
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
link_wireguard_change (NMPlatform *platform,
|
|
|
|
|
int ifindex,
|
|
|
|
|
const NMPlatformLnkWireGuard *lnk_wireguard,
|
|
|
|
|
const NMPWireGuardPeer *peers,
|
2019-02-11 11:07:06 +01:00
|
|
|
const NMPlatformWireGuardChangePeerFlags *peer_flags,
|
2019-01-13 14:21:59 +01:00
|
|
|
guint peers_len,
|
2019-02-10 15:22:26 +01:00
|
|
|
NMPlatformWireGuardChangeFlags change_flags)
|
2018-12-25 18:41:28 +01:00
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
gs_unref_ptrarray GPtrArray *msgs = NULL;
|
|
|
|
|
int wireguard_family_id;
|
|
|
|
|
guint i;
|
|
|
|
|
int r;
|
|
|
|
|
|
|
|
|
|
wireguard_family_id = _wireguard_get_family_id (platform, ifindex);
|
|
|
|
|
if (wireguard_family_id < 0)
|
|
|
|
|
return -NME_PL_NO_FIRMWARE;
|
|
|
|
|
|
|
|
|
|
r = _wireguard_create_change_nlmsgs (platform,
|
|
|
|
|
ifindex,
|
|
|
|
|
wireguard_family_id,
|
|
|
|
|
lnk_wireguard,
|
|
|
|
|
peers,
|
2019-02-11 11:07:06 +01:00
|
|
|
peer_flags,
|
2018-12-25 18:41:28 +01:00
|
|
|
peers_len,
|
2019-02-10 15:22:26 +01:00
|
|
|
change_flags,
|
2018-12-25 18:41:28 +01:00
|
|
|
&msgs);
|
|
|
|
|
if (r < 0) {
|
|
|
|
|
_LOGW ("wireguard: set-device, cannot construct netlink message: %s", nm_strerror (r));
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < msgs->len; i++) {
|
|
|
|
|
r = nl_send_auto (priv->genl, msgs->pdata[i]);
|
|
|
|
|
if (r < 0) {
|
|
|
|
|
_LOGW ("wireguard: set-device, send netlink message #%u failed: %s", i, nm_strerror (r));
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
r = nl_recvmsgs (priv->genl, NULL);
|
|
|
|
|
} while (r == -EAGAIN);
|
|
|
|
|
if (r < 0) {
|
|
|
|
|
_LOGW ("wireguard: set-device, message #%u was rejected: %s", i, nm_strerror (r));
|
|
|
|
|
return r;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_LOGT ("wireguard: set-device, message #%u sent and confirmed", i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_wireguard_refresh_link (platform, wireguard_family_id, ifindex);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-13 13:35:35 +00:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/* Copied and heavily modified from libnl3's link_msg_parser(). */
|
|
|
|
|
static NMPObject *
|
|
|
|
|
_new_from_nl_link (NMPlatform *platform, const NMPCache *cache, struct nlmsghdr *nlh, gboolean id_only)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2015-10-12 16:07:01 +02:00
|
|
|
[IFLA_IFNAME] = { .type = NLA_STRING,
|
|
|
|
|
.maxlen = IFNAMSIZ },
|
|
|
|
|
[IFLA_MTU] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_TXQLEN] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_LINK] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_WEIGHT] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_MASTER] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_OPERSTATE] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_LINKMODE] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_LINKINFO] = { .type = NLA_NESTED },
|
|
|
|
|
[IFLA_QDISC] = { .type = NLA_STRING,
|
|
|
|
|
.maxlen = IFQDISCSIZ },
|
2017-01-16 16:11:05 +01:00
|
|
|
[IFLA_STATS] = { .minlen = nm_offsetofend (struct rtnl_link_stats, tx_compressed) },
|
|
|
|
|
[IFLA_STATS64] = { .minlen = nm_offsetofend (struct rtnl_link_stats64, tx_compressed)},
|
|
|
|
|
[IFLA_MAP] = { .minlen = nm_offsetofend (struct rtnl_link_ifmap, port) },
|
2015-10-12 16:07:01 +02:00
|
|
|
[IFLA_IFALIAS] = { .type = NLA_STRING, .maxlen = IFALIASZ },
|
|
|
|
|
[IFLA_NUM_VF] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_AF_SPEC] = { .type = NLA_NESTED },
|
|
|
|
|
[IFLA_PROMISCUITY] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_GROUP] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_CARRIER] = { .type = NLA_U8 },
|
|
|
|
|
[IFLA_PHYS_PORT_ID] = { .type = NLA_UNSPEC },
|
|
|
|
|
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
|
|
|
|
|
[IFLA_NET_NS_FD] = { .type = NLA_U32 },
|
2019-02-15 11:33:57 +01:00
|
|
|
[IFLA_LINK_NETNSID] = { },
|
2015-10-12 16:07:01 +02:00
|
|
|
};
|
|
|
|
|
const struct ifinfomsg *ifi;
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-10-12 16:07:01 +02:00
|
|
|
struct nlattr *nl_info_data = NULL;
|
|
|
|
|
const char *nl_info_kind = NULL;
|
|
|
|
|
nm_auto_nmpobj NMPObject *obj = NULL;
|
|
|
|
|
gboolean completed_from_cache_val = FALSE;
|
|
|
|
|
gboolean *completed_from_cache = cache ? &completed_from_cache_val : NULL;
|
|
|
|
|
const NMPObject *link_cached = NULL;
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
const NMPObject *lnk_data = NULL;
|
2016-05-25 11:16:17 +02:00
|
|
|
gboolean address_complete_from_cache = TRUE;
|
|
|
|
|
gboolean lnk_data_complete_from_cache = TRUE;
|
2018-06-05 15:20:54 +02:00
|
|
|
gboolean need_ext_data = FALSE;
|
2016-04-29 21:25:43 +02:00
|
|
|
gboolean af_inet6_token_valid = FALSE;
|
|
|
|
|
gboolean af_inet6_addr_gen_mode_valid = FALSE;
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (!nlmsg_valid_hdr (nlh, sizeof (*ifi)))
|
|
|
|
|
return NULL;
|
2019-02-15 11:33:57 +01:00
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
ifi = nlmsg_data (nlh);
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2017-08-31 17:47:16 +02:00
|
|
|
if (ifi->ifi_family != AF_UNSPEC)
|
|
|
|
|
return NULL;
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
if (ifi->ifi_index <= 0)
|
|
|
|
|
return NULL;
|
2017-08-31 17:47:16 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
obj = nmp_object_new_link (ifi->ifi_index);
|
2015-05-07 10:16:15 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (id_only)
|
2018-09-04 11:16:28 +02:00
|
|
|
return g_steal_pointer (&obj);
|
2015-05-07 10:16:15 +02:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nlmsg_parse_arr (nlh, sizeof (*ifi), tb, policy) < 0)
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2015-05-07 10:16:15 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (!tb[IFLA_IFNAME])
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2019-02-17 11:12:02 +01:00
|
|
|
nla_strlcpy (obj->link.name, tb[IFLA_IFNAME], IFNAMSIZ);
|
2015-10-12 16:07:01 +02:00
|
|
|
if (!obj->link.name[0])
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2015-05-07 10:16:15 +02:00
|
|
|
|
2017-05-10 22:27:34 -04:00
|
|
|
if (!tb[IFLA_MTU]) {
|
|
|
|
|
/* Kernel has two places that send RTM_GETLINK messages:
|
|
|
|
|
* net/core/rtnetlink.c and net/wireless/ext-core.c.
|
|
|
|
|
* Unfotunatelly ext-core.c sets only IFLA_WIRELESS and
|
|
|
|
|
* IFLA_IFNAME. This confuses code in this function, because
|
|
|
|
|
* it cannot get complete set of data for the interface and
|
|
|
|
|
* later incomplete object this function creates is used to
|
|
|
|
|
* overwrite existing data in NM's cache.
|
|
|
|
|
* Since ext-core.c doesn't set IFLA_MTU we can use it as a
|
|
|
|
|
* signal to ignore incoming message.
|
|
|
|
|
* To some extent this is a hack and correct approach is to
|
|
|
|
|
* merge objects per-field.
|
|
|
|
|
*/
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2017-05-10 22:27:34 -04:00
|
|
|
}
|
|
|
|
|
obj->link.mtu = nla_get_u32 (tb[IFLA_MTU]);
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (tb[IFLA_LINKINFO]) {
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy_link_info[] = {
|
|
|
|
|
[IFLA_INFO_KIND] = { .type = NLA_STRING },
|
|
|
|
|
[IFLA_INFO_DATA] = { .type = NLA_NESTED },
|
|
|
|
|
[IFLA_INFO_XSTATS] = { .type = NLA_NESTED },
|
|
|
|
|
};
|
|
|
|
|
struct nlattr *li[G_N_ELEMENTS (policy_link_info)];
|
|
|
|
|
|
|
|
|
|
if (nla_parse_nested_arr (li, tb[IFLA_LINKINFO], policy_link_info) < 0)
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2015-05-07 10:16:15 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (li[IFLA_INFO_KIND])
|
|
|
|
|
nl_info_kind = nla_get_string (li[IFLA_INFO_KIND]);
|
2015-05-07 10:16:15 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
nl_info_data = li[IFLA_INFO_DATA];
|
|
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2016-08-10 11:54:30 +02:00
|
|
|
if (tb[IFLA_STATS64]) {
|
2019-02-17 11:24:38 +01:00
|
|
|
const char *stats = nla_data (tb[IFLA_STATS64]);
|
|
|
|
|
|
|
|
|
|
obj->link.rx_packets = unaligned_read_ne64 (&stats[G_STRUCT_OFFSET (struct rtnl_link_stats64, rx_packets)]);
|
|
|
|
|
obj->link.rx_bytes = unaligned_read_ne64 (&stats[G_STRUCT_OFFSET (struct rtnl_link_stats64, rx_bytes)]);
|
|
|
|
|
obj->link.tx_packets = unaligned_read_ne64 (&stats[G_STRUCT_OFFSET (struct rtnl_link_stats64, tx_packets)]);
|
|
|
|
|
obj->link.tx_bytes = unaligned_read_ne64 (&stats[G_STRUCT_OFFSET (struct rtnl_link_stats64, tx_bytes)]);
|
2016-08-10 11:54:30 +02:00
|
|
|
}
|
|
|
|
|
|
2016-02-29 15:52:27 +01:00
|
|
|
obj->link.n_ifi_flags = ifi->ifi_flags;
|
|
|
|
|
obj->link.connected = NM_FLAGS_HAS (obj->link.n_ifi_flags, IFF_LOWER_UP);
|
2015-10-12 16:07:01 +02:00
|
|
|
obj->link.arptype = ifi->ifi_type;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
obj->link.type = _linktype_get_type (platform,
|
|
|
|
|
cache,
|
|
|
|
|
nl_info_kind,
|
|
|
|
|
obj->link.ifindex,
|
|
|
|
|
obj->link.name,
|
2016-02-29 15:52:27 +01:00
|
|
|
obj->link.n_ifi_flags,
|
2015-10-12 16:07:01 +02:00
|
|
|
obj->link.arptype,
|
|
|
|
|
completed_from_cache,
|
|
|
|
|
&link_cached,
|
2015-10-16 11:28:34 +02:00
|
|
|
&obj->link.kind);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (tb[IFLA_MASTER])
|
|
|
|
|
obj->link.master = nla_get_u32 (tb[IFLA_MASTER]);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (tb[IFLA_LINK]) {
|
|
|
|
|
if (!tb[IFLA_LINK_NETNSID])
|
|
|
|
|
obj->link.parent = nla_get_u32 (tb[IFLA_LINK]);
|
|
|
|
|
else
|
|
|
|
|
obj->link.parent = NM_PLATFORM_LINK_OTHER_NETNS;
|
|
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (tb[IFLA_ADDRESS]) {
|
|
|
|
|
int l = nla_len (tb[IFLA_ADDRESS]);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (l > 0 && l <= NM_UTILS_HWADDR_LEN_MAX) {
|
|
|
|
|
G_STATIC_ASSERT (NM_UTILS_HWADDR_LEN_MAX == sizeof (obj->link.addr.data));
|
|
|
|
|
memcpy (obj->link.addr.data, nla_data (tb[IFLA_ADDRESS]), l);
|
|
|
|
|
obj->link.addr.len = l;
|
|
|
|
|
}
|
2016-05-25 11:16:17 +02:00
|
|
|
address_complete_from_cache = FALSE;
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (tb[IFLA_AF_SPEC]) {
|
|
|
|
|
struct nlattr *af_attr;
|
|
|
|
|
int remaining;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
nla_for_each_nested (af_attr, tb[IFLA_AF_SPEC], remaining) {
|
|
|
|
|
switch (nla_type (af_attr)) {
|
|
|
|
|
case AF_INET6:
|
|
|
|
|
_parse_af_inet6 (platform,
|
|
|
|
|
af_attr,
|
2016-04-29 21:25:43 +02:00
|
|
|
&obj->link.inet6_token,
|
|
|
|
|
&af_inet6_token_valid,
|
|
|
|
|
&obj->link.inet6_addr_gen_mode_inv,
|
|
|
|
|
&af_inet6_addr_gen_mode_valid);
|
2015-10-12 16:07:01 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 13:44:44 +02:00
|
|
|
switch (obj->link.type) {
|
2015-10-12 15:15:21 +02:00
|
|
|
case NM_LINK_TYPE_GRE:
|
2018-06-26 10:45:35 +02:00
|
|
|
case NM_LINK_TYPE_GRETAP:
|
2015-10-12 15:15:21 +02:00
|
|
|
lnk_data = _parse_lnk_gre (nl_info_kind, nl_info_data);
|
|
|
|
|
break;
|
2015-10-15 15:47:14 +02:00
|
|
|
case NM_LINK_TYPE_INFINIBAND:
|
|
|
|
|
lnk_data = _parse_lnk_infiniband (nl_info_kind, nl_info_data);
|
|
|
|
|
break;
|
2015-11-27 22:22:25 +01:00
|
|
|
case NM_LINK_TYPE_IP6TNL:
|
|
|
|
|
lnk_data = _parse_lnk_ip6tnl (nl_info_kind, nl_info_data);
|
|
|
|
|
break;
|
2018-06-26 12:06:43 +02:00
|
|
|
case NM_LINK_TYPE_IP6GRE:
|
|
|
|
|
case NM_LINK_TYPE_IP6GRETAP:
|
|
|
|
|
lnk_data = _parse_lnk_ip6gre (nl_info_kind, nl_info_data);
|
|
|
|
|
break;
|
2015-11-27 14:01:56 +01:00
|
|
|
case NM_LINK_TYPE_IPIP:
|
|
|
|
|
lnk_data = _parse_lnk_ipip (nl_info_kind, nl_info_data);
|
|
|
|
|
break;
|
2016-06-30 18:20:09 +02:00
|
|
|
case NM_LINK_TYPE_MACSEC:
|
|
|
|
|
lnk_data = _parse_lnk_macsec (nl_info_kind, nl_info_data);
|
|
|
|
|
break;
|
2015-10-12 15:15:21 +02:00
|
|
|
case NM_LINK_TYPE_MACVLAN:
|
2015-12-04 09:49:39 +01:00
|
|
|
case NM_LINK_TYPE_MACVTAP:
|
2015-10-12 15:15:21 +02:00
|
|
|
lnk_data = _parse_lnk_macvlan (nl_info_kind, nl_info_data);
|
|
|
|
|
break;
|
2015-11-11 18:41:48 +01:00
|
|
|
case NM_LINK_TYPE_SIT:
|
|
|
|
|
lnk_data = _parse_lnk_sit (nl_info_kind, nl_info_data);
|
|
|
|
|
break;
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
case NM_LINK_TYPE_TUN:
|
|
|
|
|
lnk_data = _parse_lnk_tun (nl_info_kind, nl_info_data);
|
|
|
|
|
break;
|
2015-10-12 13:44:44 +02:00
|
|
|
case NM_LINK_TYPE_VLAN:
|
|
|
|
|
lnk_data = _parse_lnk_vlan (nl_info_kind, nl_info_data);
|
|
|
|
|
break;
|
2015-10-12 15:15:21 +02:00
|
|
|
case NM_LINK_TYPE_VXLAN:
|
|
|
|
|
lnk_data = _parse_lnk_vxlan (nl_info_kind, nl_info_data);
|
|
|
|
|
break;
|
2018-05-15 16:47:13 +02:00
|
|
|
case NM_LINK_TYPE_WIFI:
|
|
|
|
|
case NM_LINK_TYPE_OLPC_MESH:
|
2018-06-05 15:20:54 +02:00
|
|
|
case NM_LINK_TYPE_WPAN:
|
|
|
|
|
need_ext_data = TRUE;
|
2018-05-15 16:47:13 +02:00
|
|
|
lnk_data_complete_from_cache = FALSE;
|
|
|
|
|
break;
|
2018-03-13 13:35:35 +00:00
|
|
|
case NM_LINK_TYPE_WIREGUARD:
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
lnk_data_complete_from_cache = TRUE;
|
2018-03-13 13:35:35 +00:00
|
|
|
break;
|
2015-10-12 13:44:44 +02:00
|
|
|
default:
|
2016-05-25 11:16:17 +02:00
|
|
|
lnk_data_complete_from_cache = FALSE;
|
|
|
|
|
break;
|
2015-10-12 13:44:44 +02:00
|
|
|
}
|
|
|
|
|
|
2016-05-25 11:16:17 +02:00
|
|
|
if ( completed_from_cache
|
|
|
|
|
&& ( lnk_data_complete_from_cache
|
2018-09-07 08:58:26 +02:00
|
|
|
|| need_ext_data
|
2016-04-29 21:25:43 +02:00
|
|
|
|| address_complete_from_cache
|
|
|
|
|
|| !af_inet6_token_valid
|
2016-08-15 23:23:25 +02:00
|
|
|
|| !af_inet6_addr_gen_mode_valid
|
|
|
|
|
|| !tb[IFLA_STATS64])) {
|
2015-10-12 13:44:44 +02:00
|
|
|
_lookup_cached_link (cache, obj->link.ifindex, completed_from_cache, &link_cached);
|
2018-09-04 14:48:59 +02:00
|
|
|
if ( link_cached
|
|
|
|
|
&& link_cached->_link.netlink.is_in_netlink) {
|
2016-05-25 11:16:17 +02:00
|
|
|
if ( lnk_data_complete_from_cache
|
|
|
|
|
&& link_cached->link.type == obj->link.type
|
|
|
|
|
&& link_cached->_link.netlink.lnk
|
|
|
|
|
&& ( !lnk_data
|
|
|
|
|
|| nmp_object_equal (lnk_data, link_cached->_link.netlink.lnk))) {
|
|
|
|
|
/* We always try to look into the cache and reuse the object there.
|
|
|
|
|
* We do that, because we consider the lnk object as immutable and don't
|
|
|
|
|
* modify it after creating. Hence we can share it and reuse.
|
|
|
|
|
*
|
|
|
|
|
* Also, sometimes the info-data is missing for updates. In this case
|
|
|
|
|
* we want to keep the previously received lnk_data. */
|
|
|
|
|
nmp_object_unref (lnk_data);
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
lnk_data = nmp_object_ref (link_cached->_link.netlink.lnk);
|
2016-05-25 11:16:17 +02:00
|
|
|
}
|
2018-05-15 16:47:13 +02:00
|
|
|
|
2018-06-05 15:20:54 +02:00
|
|
|
if ( need_ext_data
|
2018-05-15 16:47:13 +02:00
|
|
|
&& link_cached->link.type == obj->link.type
|
2018-06-05 15:20:54 +02:00
|
|
|
&& link_cached->_link.ext_data) {
|
|
|
|
|
/* Prefer reuse of existing ext_data object */
|
|
|
|
|
obj->_link.ext_data = g_object_ref (link_cached->_link.ext_data);
|
2018-05-15 16:47:13 +02:00
|
|
|
}
|
|
|
|
|
|
2016-05-25 11:16:17 +02:00
|
|
|
if (address_complete_from_cache)
|
|
|
|
|
obj->link.addr = link_cached->link.addr;
|
2016-04-29 21:25:43 +02:00
|
|
|
if (!af_inet6_token_valid)
|
|
|
|
|
obj->link.inet6_token = link_cached->link.inet6_token;
|
|
|
|
|
if (!af_inet6_addr_gen_mode_valid)
|
|
|
|
|
obj->link.inet6_addr_gen_mode_inv = link_cached->link.inet6_addr_gen_mode_inv;
|
2016-08-15 23:23:25 +02:00
|
|
|
if (!tb[IFLA_STATS64]) {
|
|
|
|
|
obj->link.rx_packets = link_cached->link.rx_packets;
|
|
|
|
|
obj->link.rx_bytes = link_cached->link.rx_bytes;
|
|
|
|
|
obj->link.tx_packets = link_cached->link.tx_packets;
|
|
|
|
|
obj->link.tx_bytes = link_cached->link.tx_bytes;
|
|
|
|
|
}
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-12 13:44:44 +02:00
|
|
|
obj->_link.netlink.lnk = lnk_data;
|
|
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
if ( need_ext_data
|
|
|
|
|
&& obj->_link.ext_data == NULL) {
|
2018-06-05 15:20:54 +02:00
|
|
|
switch (obj->link.type) {
|
|
|
|
|
case NM_LINK_TYPE_WIFI:
|
|
|
|
|
obj->_link.ext_data = (GObject *) nm_wifi_utils_new (ifi->ifi_index,
|
|
|
|
|
_genl_sock (NM_LINUX_PLATFORM (platform)),
|
|
|
|
|
TRUE);
|
|
|
|
|
break;
|
|
|
|
|
case NM_LINK_TYPE_OLPC_MESH:
|
2018-05-15 16:47:13 +02:00
|
|
|
#if HAVE_WEXT
|
|
|
|
|
/* The kernel driver now uses nl80211, but we force use of WEXT because
|
|
|
|
|
* the cfg80211 interactions are not quite ready to support access to
|
|
|
|
|
* mesh control through nl80211 just yet.
|
|
|
|
|
*/
|
2018-06-05 15:20:54 +02:00
|
|
|
obj->_link.ext_data = (GObject *) nm_wifi_utils_wext_new (ifi->ifi_index, FALSE);
|
2018-05-15 16:47:13 +02:00
|
|
|
#endif
|
2018-06-05 15:20:54 +02:00
|
|
|
break;
|
|
|
|
|
case NM_LINK_TYPE_WPAN:
|
|
|
|
|
obj->_link.ext_data = (GObject *) nm_wpan_utils_new (ifi->ifi_index,
|
|
|
|
|
_genl_sock (NM_LINUX_PLATFORM (platform)),
|
|
|
|
|
TRUE);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
g_assert_not_reached ();
|
2018-05-15 16:47:13 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
platform/wireguard: rework parsing wireguard links in platform
- previously, parsing wireguard genl data resulted in memory corruption:
- _wireguard_update_from_allowedips_nla() takes pointers to
allowedip = &g_array_index (buf->allowedips, NMWireGuardAllowedIP, buf->allowedips->len - 1);
but resizing the GArray will invalidate this pointer. This happens
when there are multiple allowed-ips to parse.
- there was some confusion who owned the allowedips pointers.
_wireguard_peers_cpy() and _vt_cmd_obj_dispose_lnk_wireguard()
assumed each peer owned their own chunk, but _wireguard_get_link_properties()
would not duplicate the memory properly.
- rework memory handling for allowed_ips. Now, the NMPObjectLnkWireGuard
keeps a pointer _allowed_ips_buf. This buffer contains the instances for
all peers.
The parsing of the netlink message is the complicated part, because
we don't know upfront how many peers/allowed-ips we receive. During
construction, the tracking of peers/allowed-ips is complicated,
via a CList/GArray. At the end of that, we prettify the data
representation and put everything into two buffers. That is more
efficient and simpler for user afterwards. This moves complexity
to the way how the object is created, vs. how it is used later.
- ensure that we nm_explicit_bzero() private-key and preshared-key. However,
that only works to a certain point, because our netlink library does not
ensure that no data is leaked.
- don't use a "struct sockaddr" union for the peer's endpoint. Instead,
use a combintation of endpoint_family, endpoint_port, and
endpoint_addr.
- a lot of refactoring.
2018-09-07 09:54:07 +02:00
|
|
|
if (obj->link.type == NM_LINK_TYPE_WIREGUARD) {
|
|
|
|
|
const NMPObject *lnk_data_new = NULL;
|
|
|
|
|
struct nl_sock *genl = NM_LINUX_PLATFORM_GET_PRIVATE (platform)->genl;
|
|
|
|
|
|
|
|
|
|
/* The WireGuard kernel module does not yet send link update
|
|
|
|
|
* notifications, so we don't actually update the cache. For
|
|
|
|
|
* now, always refetch link data here. */
|
|
|
|
|
|
|
|
|
|
_lookup_cached_link (cache, obj->link.ifindex, completed_from_cache, &link_cached);
|
|
|
|
|
if ( link_cached
|
|
|
|
|
&& link_cached->_link.netlink.is_in_netlink
|
|
|
|
|
&& link_cached->link.type == NM_LINK_TYPE_WIREGUARD)
|
|
|
|
|
obj->_link.wireguard_family_id = link_cached->_link.wireguard_family_id;
|
|
|
|
|
else
|
|
|
|
|
obj->_link.wireguard_family_id = -1;
|
|
|
|
|
|
|
|
|
|
if (obj->_link.wireguard_family_id < 0)
|
|
|
|
|
obj->_link.wireguard_family_id = genl_ctrl_resolve (genl, "wireguard");
|
|
|
|
|
|
|
|
|
|
if (obj->_link.wireguard_family_id >= 0) {
|
|
|
|
|
lnk_data_new = _wireguard_read_info (platform,
|
|
|
|
|
genl,
|
|
|
|
|
obj->_link.wireguard_family_id,
|
|
|
|
|
obj->link.ifindex);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ( lnk_data_new
|
|
|
|
|
&& obj->_link.netlink.lnk
|
|
|
|
|
&& nmp_object_equal (obj->_link.netlink.lnk, lnk_data_new))
|
|
|
|
|
nmp_object_unref (lnk_data_new);
|
|
|
|
|
else {
|
|
|
|
|
nmp_object_unref (obj->_link.netlink.lnk);
|
|
|
|
|
obj->_link.netlink.lnk = lnk_data_new;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-20 11:54:47 +01:00
|
|
|
obj->_link.netlink.is_in_netlink = TRUE;
|
2018-09-04 11:16:28 +02:00
|
|
|
return g_steal_pointer (&obj);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
}
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/* Copied and heavily modified from libnl3's addr_msg_parser(). */
|
|
|
|
|
static NMPObject *
|
|
|
|
|
_new_from_nl_addr (struct nlmsghdr *nlh, gboolean id_only)
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2015-10-12 16:07:01 +02:00
|
|
|
[IFA_LABEL] = { .type = NLA_STRING,
|
2019-02-15 11:33:57 +01:00
|
|
|
.maxlen = IFNAMSIZ },
|
2017-01-16 16:11:05 +01:00
|
|
|
[IFA_CACHEINFO] = { .minlen = nm_offsetofend (struct ifa_cacheinfo, tstamp) },
|
2019-02-15 11:33:57 +01:00
|
|
|
[IFA_FLAGS] = { },
|
2015-10-12 16:07:01 +02:00
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-10-12 16:07:01 +02:00
|
|
|
const struct ifaddrmsg *ifa;
|
|
|
|
|
gboolean is_v4;
|
|
|
|
|
nm_auto_nmpobj NMPObject *obj = NULL;
|
|
|
|
|
int addr_len;
|
|
|
|
|
guint32 lifetime, preferred, timestamp;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (!nlmsg_valid_hdr (nlh, sizeof (*ifa)))
|
|
|
|
|
return NULL;
|
2019-02-15 11:33:57 +01:00
|
|
|
|
|
|
|
|
ifa = nlmsg_data (nlh);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (!NM_IN_SET (ifa->ifa_family, AF_INET, AF_INET6))
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2019-02-15 11:33:57 +01:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
is_v4 = ifa->ifa_family == AF_INET;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2019-02-17 11:12:02 +01:00
|
|
|
if (nlmsg_parse_arr (nlh, sizeof (*ifa), tb, policy) < 0)
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
addr_len = is_v4
|
|
|
|
|
? sizeof (in_addr_t)
|
|
|
|
|
: sizeof (struct in6_addr);
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2016-04-06 18:04:26 +02:00
|
|
|
if (ifa->ifa_prefixlen > (is_v4 ? 32 : 128))
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2016-04-06 18:04:26 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/*****************************************************************/
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
obj = nmp_object_new (is_v4 ? NMP_OBJECT_TYPE_IP4_ADDRESS : NMP_OBJECT_TYPE_IP6_ADDRESS, NULL);
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
obj->ip_address.ifindex = ifa->ifa_index;
|
|
|
|
|
obj->ip_address.plen = ifa->ifa_prefixlen;
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
_check_addr_or_return_null (tb, IFA_ADDRESS, addr_len);
|
|
|
|
|
_check_addr_or_return_null (tb, IFA_LOCAL, addr_len);
|
2015-10-21 23:17:11 +02:00
|
|
|
if (is_v4) {
|
|
|
|
|
/* For IPv4, kernel omits IFA_LOCAL/IFA_ADDRESS if (and only if) they
|
|
|
|
|
* are effectively 0.0.0.0 (all-zero). */
|
|
|
|
|
if (tb[IFA_LOCAL])
|
|
|
|
|
memcpy (&obj->ip4_address.address, nla_data (tb[IFA_LOCAL]), addr_len);
|
|
|
|
|
if (tb[IFA_ADDRESS])
|
|
|
|
|
memcpy (&obj->ip4_address.peer_address, nla_data (tb[IFA_ADDRESS]), addr_len);
|
|
|
|
|
} else {
|
|
|
|
|
/* For IPv6, IFA_ADDRESS is always present.
|
|
|
|
|
*
|
|
|
|
|
* If IFA_LOCAL is missing, IFA_ADDRESS is @address and @peer_address
|
|
|
|
|
* is :: (all-zero).
|
|
|
|
|
*
|
|
|
|
|
* If unexpectely IFA_ADDRESS is missing, make the best of it -- but it _should_
|
|
|
|
|
* actually be there. */
|
|
|
|
|
if (tb[IFA_ADDRESS] || tb[IFA_LOCAL]) {
|
|
|
|
|
if (tb[IFA_LOCAL]) {
|
|
|
|
|
memcpy (&obj->ip6_address.address, nla_data (tb[IFA_LOCAL]), addr_len);
|
|
|
|
|
if (tb[IFA_ADDRESS])
|
|
|
|
|
memcpy (&obj->ip6_address.peer_address, nla_data (tb[IFA_ADDRESS]), addr_len);
|
|
|
|
|
else
|
|
|
|
|
obj->ip6_address.peer_address = obj->ip6_address.address;
|
|
|
|
|
} else
|
|
|
|
|
memcpy (&obj->ip6_address.address, nla_data (tb[IFA_ADDRESS]), addr_len);
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
|
|
|
|
}
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2016-04-11 13:09:52 +02:00
|
|
|
obj->ip_address.addr_source = NM_IP_CONFIG_SOURCE_KERNEL;
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2016-02-29 16:50:55 +01:00
|
|
|
obj->ip_address.n_ifa_flags = tb[IFA_FLAGS]
|
|
|
|
|
? nla_get_u32 (tb[IFA_FLAGS])
|
|
|
|
|
: ifa->ifa_flags;
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (is_v4) {
|
|
|
|
|
if (tb[IFA_LABEL]) {
|
|
|
|
|
char label[IFNAMSIZ];
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
nla_strlcpy (label, tb[IFA_LABEL], IFNAMSIZ);
|
2015-05-29 09:40:24 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/* Check for ':'; we're only interested in labels used as interface aliases */
|
|
|
|
|
if (strchr (label, ':'))
|
|
|
|
|
g_strlcpy (obj->ip4_address.label, label, sizeof (obj->ip4_address.label));
|
|
|
|
|
}
|
2015-05-29 09:40:24 +02:00
|
|
|
}
|
2015-10-12 16:07:01 +02:00
|
|
|
|
|
|
|
|
lifetime = NM_PLATFORM_LIFETIME_PERMANENT;
|
|
|
|
|
preferred = NM_PLATFORM_LIFETIME_PERMANENT;
|
|
|
|
|
timestamp = 0;
|
|
|
|
|
/* IPv6 only */
|
|
|
|
|
if (tb[IFA_CACHEINFO]) {
|
2019-02-21 08:37:40 +01:00
|
|
|
const struct ifa_cacheinfo *ca;
|
2015-10-12 16:07:01 +02:00
|
|
|
|
2019-02-21 08:37:40 +01:00
|
|
|
ca = nla_data_as (struct ifa_cacheinfo, tb[IFA_CACHEINFO]);
|
2015-10-12 16:07:01 +02:00
|
|
|
lifetime = ca->ifa_valid;
|
|
|
|
|
preferred = ca->ifa_prefered;
|
|
|
|
|
timestamp = ca->tstamp;
|
|
|
|
|
}
|
|
|
|
|
_addrtime_get_lifetimes (timestamp,
|
|
|
|
|
lifetime,
|
|
|
|
|
preferred,
|
|
|
|
|
&obj->ip_address.timestamp,
|
|
|
|
|
&obj->ip_address.lifetime,
|
|
|
|
|
&obj->ip_address.preferred);
|
|
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
return g_steal_pointer (&obj);
|
2015-05-29 09:40:24 +02:00
|
|
|
}
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/* Copied and heavily modified from libnl3's rtnl_route_parse() and parse_multipath(). */
|
|
|
|
|
static NMPObject *
|
|
|
|
|
_new_from_nl_route (struct nlmsghdr *nlh, gboolean id_only)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2017-09-13 17:42:41 +02:00
|
|
|
[RTA_TABLE] = { .type = NLA_U32 },
|
2015-10-12 16:07:01 +02:00
|
|
|
[RTA_IIF] = { .type = NLA_U32 },
|
|
|
|
|
[RTA_OIF] = { .type = NLA_U32 },
|
|
|
|
|
[RTA_PRIORITY] = { .type = NLA_U32 },
|
2017-10-09 11:09:16 +02:00
|
|
|
[RTA_PREF] = { .type = NLA_U8 },
|
2015-10-12 16:07:01 +02:00
|
|
|
[RTA_FLOW] = { .type = NLA_U32 },
|
2017-01-16 16:11:05 +01:00
|
|
|
[RTA_CACHEINFO] = { .minlen = nm_offsetofend (struct rta_cacheinfo, rta_tsage) },
|
2015-10-12 16:07:01 +02:00
|
|
|
[RTA_METRICS] = { .type = NLA_NESTED },
|
|
|
|
|
[RTA_MULTIPATH] = { .type = NLA_NESTED },
|
|
|
|
|
};
|
|
|
|
|
const struct rtmsg *rtm;
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2015-10-12 16:07:01 +02:00
|
|
|
gboolean is_v4;
|
|
|
|
|
nm_auto_nmpobj NMPObject *obj = NULL;
|
|
|
|
|
int addr_len;
|
|
|
|
|
struct {
|
|
|
|
|
gboolean is_present;
|
|
|
|
|
int ifindex;
|
|
|
|
|
NMIPAddr gateway;
|
2019-02-17 11:12:38 +01:00
|
|
|
} nh = {
|
|
|
|
|
.is_present = FALSE,
|
|
|
|
|
};
|
2015-10-12 16:07:01 +02:00
|
|
|
guint32 mss;
|
2019-02-15 11:33:57 +01:00
|
|
|
guint32 window = 0;
|
|
|
|
|
guint32 cwnd = 0;
|
|
|
|
|
guint32 initcwnd = 0;
|
|
|
|
|
guint32 initrwnd = 0;
|
|
|
|
|
guint32 mtu = 0;
|
|
|
|
|
guint32 lock = 0;
|
2015-10-12 16:07:01 +02:00
|
|
|
|
|
|
|
|
if (!nlmsg_valid_hdr (nlh, sizeof (*rtm)))
|
|
|
|
|
return NULL;
|
2019-02-15 11:33:57 +01:00
|
|
|
|
|
|
|
|
rtm = nlmsg_data (nlh);
|
2014-10-22 18:19:54 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/*****************************************************************
|
|
|
|
|
* only handle ~normal~ routes.
|
|
|
|
|
*****************************************************************/
|
2015-05-10 09:16:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (!NM_IN_SET (rtm->rtm_family, AF_INET, AF_INET6))
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2014-10-22 18:19:54 +02:00
|
|
|
|
2017-02-15 14:00:44 +01:00
|
|
|
if (rtm->rtm_type != RTN_UNICAST)
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2015-08-30 15:51:20 +02:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nlmsg_parse_arr (nlh,
|
|
|
|
|
sizeof (struct rtmsg),
|
|
|
|
|
tb,
|
|
|
|
|
policy) < 0)
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2014-10-22 18:19:54 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/*****************************************************************/
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
is_v4 = rtm->rtm_family == AF_INET;
|
|
|
|
|
addr_len = is_v4
|
|
|
|
|
? sizeof (in_addr_t)
|
|
|
|
|
: sizeof (struct in6_addr);
|
2015-05-10 09:16:31 +02:00
|
|
|
|
2016-04-06 14:19:05 +02:00
|
|
|
if (rtm->rtm_dst_len > (is_v4 ? 32 : 128))
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2016-04-06 14:19:05 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/*****************************************************************
|
|
|
|
|
* parse nexthops. Only handle routes with one nh.
|
|
|
|
|
*****************************************************************/
|
2014-10-22 18:19:54 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (tb[RTA_MULTIPATH]) {
|
2019-02-17 11:12:02 +01:00
|
|
|
size_t tlen = nla_len (tb[RTA_MULTIPATH]);
|
2019-02-21 08:37:40 +01:00
|
|
|
struct rtnexthop *rtnh;
|
2014-10-22 18:19:54 +02:00
|
|
|
|
2019-02-21 08:37:40 +01:00
|
|
|
if (tlen < sizeof (*rtnh))
|
|
|
|
|
goto rta_multipath_done;
|
|
|
|
|
|
|
|
|
|
rtnh = nla_data_as (struct rtnexthop, tb[RTA_MULTIPATH]);
|
|
|
|
|
|
|
|
|
|
if (tlen < rtnh->rtnh_len)
|
|
|
|
|
goto rta_multipath_done;
|
|
|
|
|
|
|
|
|
|
while (TRUE) {
|
2014-10-22 18:19:54 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (nh.is_present) {
|
|
|
|
|
/* we don't support multipath routes. */
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
2014-10-22 18:19:54 +02:00
|
|
|
|
2019-02-21 08:37:40 +01:00
|
|
|
nh.is_present = TRUE;
|
2015-10-12 16:07:01 +02:00
|
|
|
nh.ifindex = rtnh->rtnh_ifindex;
|
2013-07-27 00:42:10 +02:00
|
|
|
|
2019-02-17 11:12:02 +01:00
|
|
|
if (rtnh->rtnh_len > sizeof (*rtnh)) {
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *ntb[G_N_ELEMENTS (policy)];
|
2015-10-12 16:07:01 +02:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_arr (ntb,
|
|
|
|
|
(struct nlattr *) RTNH_DATA (rtnh),
|
|
|
|
|
rtnh->rtnh_len - sizeof (*rtnh),
|
|
|
|
|
policy) < 0)
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2015-10-12 16:07:01 +02:00
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
if (_check_addr_or_return_null (ntb, RTA_GATEWAY, addr_len))
|
2015-10-12 16:07:01 +02:00
|
|
|
memcpy (&nh.gateway, nla_data (ntb[RTA_GATEWAY]), addr_len);
|
|
|
|
|
}
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2019-02-21 08:37:40 +01:00
|
|
|
if (tlen < RTNH_ALIGN (rtnh->rtnh_len) + sizeof (*rtnh))
|
|
|
|
|
goto rta_multipath_done;
|
|
|
|
|
|
2019-02-17 11:12:02 +01:00
|
|
|
tlen -= RTNH_ALIGN (rtnh->rtnh_len);
|
|
|
|
|
rtnh = RTNH_NEXT (rtnh);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
2019-02-21 08:37:40 +01:00
|
|
|
rta_multipath_done:
|
|
|
|
|
;
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ( tb[RTA_OIF]
|
|
|
|
|
|| tb[RTA_GATEWAY]
|
|
|
|
|
|| tb[RTA_FLOW]) {
|
|
|
|
|
int ifindex = 0;
|
2017-09-14 08:32:27 +02:00
|
|
|
NMIPAddr gateway = { };
|
2015-10-12 16:07:01 +02:00
|
|
|
|
|
|
|
|
if (tb[RTA_OIF])
|
|
|
|
|
ifindex = nla_get_u32 (tb[RTA_OIF]);
|
2018-09-04 11:16:28 +02:00
|
|
|
if (_check_addr_or_return_null (tb, RTA_GATEWAY, addr_len))
|
2015-10-12 16:07:01 +02:00
|
|
|
memcpy (&gateway, nla_data (tb[RTA_GATEWAY]), addr_len);
|
|
|
|
|
|
|
|
|
|
if (!nh.is_present) {
|
|
|
|
|
/* If no nexthops have been provided via RTA_MULTIPATH
|
|
|
|
|
* we add it as regular nexthop to maintain backwards
|
|
|
|
|
* compatibility */
|
|
|
|
|
nh.ifindex = ifindex;
|
|
|
|
|
nh.gateway = gateway;
|
|
|
|
|
} else {
|
|
|
|
|
/* Kernel supports new style nexthop configuration,
|
|
|
|
|
* verify that it is a duplicate and ignore old-style nexthop. */
|
|
|
|
|
if ( nh.ifindex != ifindex
|
|
|
|
|
|| memcmp (&nh.gateway, &gateway, addr_len) != 0)
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
2015-10-12 16:07:01 +02:00
|
|
|
} else if (!nh.is_present)
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/*****************************************************************/
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
mss = 0;
|
|
|
|
|
if (tb[RTA_METRICS]) {
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy rtax_policy[] = {
|
2017-02-15 14:00:44 +01:00
|
|
|
[RTAX_LOCK] = { .type = NLA_U32 },
|
|
|
|
|
[RTAX_ADVMSS] = { .type = NLA_U32 },
|
|
|
|
|
[RTAX_WINDOW] = { .type = NLA_U32 },
|
|
|
|
|
[RTAX_CWND] = { .type = NLA_U32 },
|
|
|
|
|
[RTAX_INITCWND] = { .type = NLA_U32 },
|
|
|
|
|
[RTAX_INITRWND] = { .type = NLA_U32 },
|
|
|
|
|
[RTAX_MTU] = { .type = NLA_U32 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *mtb[G_N_ELEMENTS (rtax_policy)];
|
2014-01-07 17:21:12 +01:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_nested_arr (mtb, tb[RTA_METRICS], rtax_policy) < 0)
|
2018-09-04 11:16:28 +02:00
|
|
|
return NULL;
|
2014-01-07 17:21:12 +01:00
|
|
|
|
2017-02-15 14:00:44 +01:00
|
|
|
if (mtb[RTAX_LOCK])
|
|
|
|
|
lock = nla_get_u32 (mtb[RTAX_LOCK]);
|
|
|
|
|
if (mtb[RTAX_ADVMSS])
|
|
|
|
|
mss = nla_get_u32 (mtb[RTAX_ADVMSS]);
|
|
|
|
|
if (mtb[RTAX_WINDOW])
|
|
|
|
|
window = nla_get_u32 (mtb[RTAX_WINDOW]);
|
|
|
|
|
if (mtb[RTAX_CWND])
|
|
|
|
|
cwnd = nla_get_u32 (mtb[RTAX_CWND]);
|
|
|
|
|
if (mtb[RTAX_INITCWND])
|
|
|
|
|
initcwnd = nla_get_u32 (mtb[RTAX_INITCWND]);
|
|
|
|
|
if (mtb[RTAX_INITRWND])
|
|
|
|
|
initrwnd = nla_get_u32 (mtb[RTAX_INITRWND]);
|
|
|
|
|
if (mtb[RTAX_MTU])
|
|
|
|
|
mtu = nla_get_u32 (mtb[RTAX_MTU]);
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
2014-07-24 15:57:08 -05:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/*****************************************************************/
|
2014-07-24 15:57:08 -05:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
obj = nmp_object_new (is_v4 ? NMP_OBJECT_TYPE_IP4_ROUTE : NMP_OBJECT_TYPE_IP6_ROUTE, NULL);
|
2015-06-19 15:38:41 +02:00
|
|
|
|
2017-09-13 17:42:41 +02:00
|
|
|
obj->ip_route.table_coerced = nm_platform_route_table_coerce ( tb[RTA_TABLE]
|
|
|
|
|
? nla_get_u32 (tb[RTA_TABLE])
|
|
|
|
|
: (guint32) rtm->rtm_table);
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
obj->ip_route.ifindex = nh.ifindex;
|
2015-06-19 16:58:28 +02:00
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
if (_check_addr_or_return_null (tb, RTA_DST, addr_len))
|
2015-10-12 16:07:01 +02:00
|
|
|
memcpy (obj->ip_route.network_ptr, nla_data (tb[RTA_DST]), addr_len);
|
2015-06-19 16:58:28 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
obj->ip_route.plen = rtm->rtm_dst_len;
|
2015-06-19 16:58:28 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (tb[RTA_PRIORITY])
|
2019-02-17 11:12:02 +01:00
|
|
|
obj->ip_route.metric = nla_get_u32 (tb[RTA_PRIORITY]);
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (is_v4)
|
|
|
|
|
obj->ip4_route.gateway = nh.gateway.addr4;
|
|
|
|
|
else
|
|
|
|
|
obj->ip6_route.gateway = nh.gateway.addr6;
|
2015-04-13 15:44:10 -05:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (is_v4)
|
|
|
|
|
obj->ip4_route.scope_inv = nm_platform_route_scope_inv (rtm->rtm_scope);
|
2015-04-13 15:44:10 -05:00
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
if (_check_addr_or_return_null (tb, RTA_PREFSRC, addr_len)) {
|
2017-02-09 17:26:04 +01:00
|
|
|
if (is_v4)
|
2015-11-05 11:54:54 +01:00
|
|
|
memcpy (&obj->ip4_route.pref_src, nla_data (tb[RTA_PREFSRC]), addr_len);
|
2017-02-09 17:26:04 +01:00
|
|
|
else
|
|
|
|
|
memcpy (&obj->ip6_route.pref_src, nla_data (tb[RTA_PREFSRC]), addr_len);
|
2015-04-13 15:44:10 -05:00
|
|
|
}
|
|
|
|
|
|
2017-08-02 07:16:35 +02:00
|
|
|
if (is_v4)
|
|
|
|
|
obj->ip4_route.tos = rtm->rtm_tos;
|
|
|
|
|
else {
|
2017-07-26 09:25:21 +02:00
|
|
|
if (tb[RTA_SRC]) {
|
2018-09-04 11:16:28 +02:00
|
|
|
_check_addr_or_return_null (tb, RTA_SRC, addr_len);
|
2017-07-26 09:25:21 +02:00
|
|
|
memcpy (&obj->ip6_route.src, nla_data (tb[RTA_SRC]), addr_len);
|
|
|
|
|
}
|
2017-02-15 14:00:44 +01:00
|
|
|
obj->ip6_route.src_plen = rtm->rtm_src_len;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
obj->ip_route.mss = mss;
|
2017-02-15 14:00:44 +01:00
|
|
|
obj->ip_route.window = window;
|
|
|
|
|
obj->ip_route.cwnd = cwnd;
|
|
|
|
|
obj->ip_route.initcwnd = initcwnd;
|
|
|
|
|
obj->ip_route.initrwnd = initrwnd;
|
|
|
|
|
obj->ip_route.mtu = mtu;
|
2017-07-27 07:21:01 +02:00
|
|
|
obj->ip_route.lock_window = NM_FLAGS_HAS (lock, 1 << RTAX_WINDOW);
|
|
|
|
|
obj->ip_route.lock_cwnd = NM_FLAGS_HAS (lock, 1 << RTAX_CWND);
|
2017-02-15 14:00:44 +01:00
|
|
|
obj->ip_route.lock_initcwnd = NM_FLAGS_HAS (lock, 1 << RTAX_INITCWND);
|
|
|
|
|
obj->ip_route.lock_initrwnd = NM_FLAGS_HAS (lock, 1 << RTAX_INITRWND);
|
2017-07-27 07:21:01 +02:00
|
|
|
obj->ip_route.lock_mtu = NM_FLAGS_HAS (lock, 1 << RTAX_MTU);
|
2015-10-12 16:07:01 +02:00
|
|
|
|
2017-10-11 09:15:04 +02:00
|
|
|
if (!is_v4) {
|
|
|
|
|
/* Detect support for RTA_PREF by inspecting the netlink message. */
|
|
|
|
|
if (_support_rta_pref_still_undecided ())
|
|
|
|
|
_support_rta_pref_detect (tb);
|
|
|
|
|
|
|
|
|
|
if (tb[RTA_PREF])
|
|
|
|
|
obj->ip6_route.rt_pref = nla_get_u8 (tb[RTA_PREF]);
|
|
|
|
|
}
|
2017-10-09 11:09:16 +02:00
|
|
|
|
2017-11-07 15:32:55 +01:00
|
|
|
obj->ip_route.r_rtm_flags = rtm->rtm_flags;
|
2016-04-11 15:32:45 +02:00
|
|
|
obj->ip_route.rt_source = nmp_utils_ip_config_source_from_rtprot (rtm->rtm_protocol);
|
2015-10-12 16:07:01 +02:00
|
|
|
|
2018-09-04 11:16:28 +02:00
|
|
|
return g_steal_pointer (&obj);
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
|
|
|
|
|
2017-11-15 20:36:35 +01:00
|
|
|
static NMPObject *
|
|
|
|
|
_new_from_nl_qdisc (struct nlmsghdr *nlh, gboolean id_only)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2017-11-15 20:36:35 +01:00
|
|
|
[TCA_KIND] = { .type = NLA_STRING },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
|
|
|
|
const struct tcmsg *tcm;
|
|
|
|
|
NMPObject *obj;
|
2017-11-15 20:36:35 +01:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nlmsg_parse_arr (nlh,
|
|
|
|
|
sizeof (*tcm),
|
|
|
|
|
tb,
|
|
|
|
|
policy) < 0)
|
2017-11-15 20:36:35 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
if (!tb[TCA_KIND])
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
tcm = nlmsg_data (nlh);
|
|
|
|
|
|
2017-11-15 20:36:35 +01:00
|
|
|
obj = nmp_object_new (NMP_OBJECT_TYPE_QDISC, NULL);
|
|
|
|
|
|
|
|
|
|
obj->qdisc.kind = g_intern_string (nla_get_string (tb[TCA_KIND]));
|
|
|
|
|
obj->qdisc.ifindex = tcm->tcm_ifindex;
|
|
|
|
|
obj->qdisc.addr_family = tcm->tcm_family;
|
|
|
|
|
obj->qdisc.handle = tcm->tcm_handle;
|
|
|
|
|
obj->qdisc.parent = tcm->tcm_parent;
|
|
|
|
|
obj->qdisc.info = tcm->tcm_info;
|
|
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-15 20:36:35 +01:00
|
|
|
static NMPObject *
|
|
|
|
|
_new_from_nl_tfilter (struct nlmsghdr *nlh, gboolean id_only)
|
|
|
|
|
{
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2017-11-15 20:36:35 +01:00
|
|
|
[TCA_KIND] = { .type = NLA_STRING },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
|
|
|
|
NMPObject *obj = NULL;
|
|
|
|
|
const struct tcmsg *tcm;
|
2017-11-15 20:36:35 +01:00
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nlmsg_parse_arr (nlh, sizeof (*tcm), tb, policy) < 0)
|
2017-11-15 20:36:35 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
if (!tb[TCA_KIND])
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2019-02-15 11:33:57 +01:00
|
|
|
tcm = nlmsg_data (nlh);
|
|
|
|
|
|
2017-11-15 20:36:35 +01:00
|
|
|
obj = nmp_object_new (NMP_OBJECT_TYPE_TFILTER, NULL);
|
|
|
|
|
|
|
|
|
|
obj->tfilter.kind = g_intern_string (nla_get_string (tb[TCA_KIND]));
|
|
|
|
|
obj->tfilter.ifindex = tcm->tcm_ifindex;
|
|
|
|
|
obj->tfilter.addr_family = tcm->tcm_family;
|
|
|
|
|
obj->tfilter.handle = tcm->tcm_handle;
|
|
|
|
|
obj->tfilter.parent = tcm->tcm_parent;
|
|
|
|
|
obj->tfilter.info = tcm->tcm_info;
|
|
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
/**
|
|
|
|
|
* nmp_object_new_from_nl:
|
|
|
|
|
* @platform: (allow-none): for creating certain objects, the constructor wants to check
|
|
|
|
|
* sysfs. For this the platform instance is needed. If missing, the object might not
|
|
|
|
|
* be correctly detected.
|
|
|
|
|
* @cache: (allow-none): for certain objects, the netlink message doesn't contain all the information.
|
|
|
|
|
* If a cache is given, the object is completed with information from the cache.
|
|
|
|
|
* @nlh: the netlink message header
|
|
|
|
|
* @id_only: whether only to create an empty object with only the ID fields set.
|
|
|
|
|
*
|
|
|
|
|
* Returns: %NULL or a newly created NMPObject instance.
|
|
|
|
|
**/
|
|
|
|
|
static NMPObject *
|
|
|
|
|
nmp_object_new_from_nl (NMPlatform *platform, const NMPCache *cache, struct nl_msg *msg, gboolean id_only)
|
2015-07-01 12:52:21 +02:00
|
|
|
{
|
2015-10-12 16:07:01 +02:00
|
|
|
struct nlmsghdr *msghdr;
|
2015-07-01 12:52:21 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (nlmsg_get_proto (msg) != NETLINK_ROUTE)
|
|
|
|
|
return NULL;
|
2015-07-01 12:52:21 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
msghdr = nlmsg_hdr (msg);
|
2015-07-01 12:52:21 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
switch (msghdr->nlmsg_type) {
|
|
|
|
|
case RTM_NEWLINK:
|
|
|
|
|
case RTM_DELLINK:
|
|
|
|
|
case RTM_GETLINK:
|
|
|
|
|
case RTM_SETLINK:
|
|
|
|
|
return _new_from_nl_link (platform, cache, msghdr, id_only);
|
|
|
|
|
case RTM_NEWADDR:
|
|
|
|
|
case RTM_DELADDR:
|
|
|
|
|
case RTM_GETADDR:
|
|
|
|
|
return _new_from_nl_addr (msghdr, id_only);
|
|
|
|
|
case RTM_NEWROUTE:
|
|
|
|
|
case RTM_DELROUTE:
|
|
|
|
|
case RTM_GETROUTE:
|
|
|
|
|
return _new_from_nl_route (msghdr, id_only);
|
2017-11-15 20:36:35 +01:00
|
|
|
case RTM_NEWQDISC:
|
|
|
|
|
case RTM_DELQDISC:
|
|
|
|
|
case RTM_GETQDISC:
|
|
|
|
|
return _new_from_nl_qdisc (msghdr, id_only);
|
2017-11-15 20:36:35 +01:00
|
|
|
case RTM_NEWTFILTER:
|
|
|
|
|
case RTM_DELTFILTER:
|
|
|
|
|
case RTM_GETTFILTER:
|
|
|
|
|
return _new_from_nl_tfilter (msghdr, id_only);
|
2015-10-12 16:07:01 +02:00
|
|
|
default:
|
|
|
|
|
return NULL;
|
2015-07-01 12:52:21 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-10-12 16:07:01 +02:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
static gboolean
|
|
|
|
|
_nl_msg_new_link_set_afspec (struct nl_msg *msg,
|
2016-04-30 16:48:32 +02:00
|
|
|
int addr_gen_mode,
|
|
|
|
|
NMUtilsIPv6IfaceId *iid)
|
2015-10-20 09:27:16 +02:00
|
|
|
{
|
|
|
|
|
struct nlattr *af_spec;
|
|
|
|
|
struct nlattr *af_attr;
|
|
|
|
|
|
|
|
|
|
nm_assert (msg);
|
|
|
|
|
|
|
|
|
|
if (!(af_spec = nla_nest_start (msg, IFLA_AF_SPEC)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
2016-04-30 16:48:32 +02:00
|
|
|
if (addr_gen_mode >= 0 || iid) {
|
2015-10-20 09:27:16 +02:00
|
|
|
if (!(af_attr = nla_nest_start (msg, AF_INET6)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
2016-04-30 16:48:32 +02:00
|
|
|
if (addr_gen_mode >= 0)
|
|
|
|
|
NLA_PUT_U8 (msg, IFLA_INET6_ADDR_GEN_MODE, addr_gen_mode);
|
|
|
|
|
|
|
|
|
|
if (iid) {
|
|
|
|
|
struct in6_addr i6_token = { .s6_addr = { 0, } };
|
|
|
|
|
|
|
|
|
|
nm_utils_ipv6_addr_set_interface_identifier (&i6_token, *iid);
|
|
|
|
|
NLA_PUT (msg, IFLA_INET6_TOKEN, sizeof (struct in6_addr), &i6_token);
|
|
|
|
|
}
|
2015-10-20 09:27:16 +02:00
|
|
|
|
|
|
|
|
nla_nest_end (msg, af_attr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nla_nest_end (msg, af_spec);
|
|
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
|
nla_put_failure:
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_nl_msg_new_link_set_linkinfo (struct nl_msg *msg,
|
2017-05-26 14:00:29 +02:00
|
|
|
NMLinkType link_type,
|
|
|
|
|
const char *veth_peer)
|
2015-10-20 09:27:16 +02:00
|
|
|
{
|
|
|
|
|
struct nlattr *info;
|
|
|
|
|
const char *kind;
|
|
|
|
|
|
|
|
|
|
nm_assert (msg);
|
2017-05-26 14:00:29 +02:00
|
|
|
nm_assert (!!veth_peer == (link_type == NM_LINK_TYPE_VETH));
|
2015-10-20 09:27:16 +02:00
|
|
|
|
|
|
|
|
kind = nm_link_type_to_rtnl_type_string (link_type);
|
|
|
|
|
if (!kind)
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (!(info = nla_nest_start (msg, IFLA_LINKINFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (msg, IFLA_INFO_KIND, kind);
|
|
|
|
|
|
2017-05-26 14:00:29 +02:00
|
|
|
if (veth_peer) {
|
2019-02-19 16:35:59 +01:00
|
|
|
const struct ifinfomsg ifi = { };
|
2017-05-26 14:00:29 +02:00
|
|
|
struct nlattr *data, *info_peer;
|
|
|
|
|
|
|
|
|
|
if (!(data = nla_nest_start (msg, IFLA_INFO_DATA)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
if (!(info_peer = nla_nest_start (msg, 1 /*VETH_INFO_PEER*/)))
|
|
|
|
|
goto nla_put_failure;
|
2019-02-19 16:35:59 +01:00
|
|
|
if (nlmsg_append_struct (msg, &ifi) < 0)
|
2017-05-26 14:00:29 +02:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
NLA_PUT_STRING (msg, IFLA_IFNAME, veth_peer);
|
|
|
|
|
nla_nest_end (msg, info_peer);
|
|
|
|
|
nla_nest_end (msg, data);
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nla_nest_end (msg, info);
|
|
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
|
nla_put_failure:
|
2017-05-26 14:00:29 +02:00
|
|
|
g_return_val_if_reached (FALSE);
|
2015-10-20 09:27:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_nl_msg_new_link_set_linkinfo_vlan (struct nl_msg *msg,
|
|
|
|
|
int vlan_id,
|
|
|
|
|
guint32 flags_mask,
|
|
|
|
|
guint32 flags_set,
|
2015-10-27 16:14:54 +01:00
|
|
|
const NMVlanQosMapping *ingress_qos,
|
2015-10-20 09:27:16 +02:00
|
|
|
int ingress_qos_len,
|
2015-10-27 16:14:54 +01:00
|
|
|
const NMVlanQosMapping *egress_qos,
|
2015-10-20 09:27:16 +02:00
|
|
|
int egress_qos_len)
|
|
|
|
|
{
|
|
|
|
|
struct nlattr *info;
|
|
|
|
|
struct nlattr *data;
|
|
|
|
|
guint i;
|
2015-10-27 16:14:54 +01:00
|
|
|
gboolean has_any_vlan_properties = FALSE;
|
|
|
|
|
|
|
|
|
|
#define VLAN_XGRESS_PRIO_VALID(from) (((from) & ~(guint32) 0x07) == 0)
|
2015-10-20 09:27:16 +02:00
|
|
|
|
|
|
|
|
nm_assert (msg);
|
|
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
/* We must not create an empty IFLA_LINKINFO section. Otherwise, kernel
|
|
|
|
|
* rejects the request as invalid. */
|
|
|
|
|
if ( flags_mask != 0
|
|
|
|
|
|| vlan_id >= 0)
|
|
|
|
|
has_any_vlan_properties = TRUE;
|
|
|
|
|
if ( !has_any_vlan_properties
|
|
|
|
|
&& ingress_qos && ingress_qos_len > 0) {
|
|
|
|
|
for (i = 0; i < ingress_qos_len; i++) {
|
|
|
|
|
if (VLAN_XGRESS_PRIO_VALID (ingress_qos[i].from)) {
|
|
|
|
|
has_any_vlan_properties = TRUE;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if ( !has_any_vlan_properties
|
|
|
|
|
&& egress_qos && egress_qos_len > 0) {
|
|
|
|
|
for (i = 0; i < egress_qos_len; i++) {
|
|
|
|
|
if (VLAN_XGRESS_PRIO_VALID (egress_qos[i].to)) {
|
|
|
|
|
has_any_vlan_properties = TRUE;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!has_any_vlan_properties)
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
if (!(info = nla_nest_start (msg, IFLA_LINKINFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (msg, IFLA_INFO_KIND, "vlan");
|
|
|
|
|
|
|
|
|
|
if (!(data = nla_nest_start (msg, IFLA_INFO_DATA)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (vlan_id >= 0)
|
|
|
|
|
NLA_PUT_U16 (msg, IFLA_VLAN_ID, vlan_id);
|
|
|
|
|
|
|
|
|
|
if (flags_mask != 0) {
|
|
|
|
|
struct ifla_vlan_flags flags = {
|
|
|
|
|
.flags = flags_mask & flags_set,
|
|
|
|
|
.mask = flags_mask,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
NLA_PUT (msg, IFLA_VLAN_FLAGS, sizeof (flags), &flags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ingress_qos && ingress_qos_len > 0) {
|
2015-10-27 16:14:54 +01:00
|
|
|
struct nlattr *qos = NULL;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < ingress_qos_len; i++) {
|
|
|
|
|
/* Silently ignore invalid mappings. Kernel would truncate
|
|
|
|
|
* them and modify the wrong mapping. */
|
|
|
|
|
if (VLAN_XGRESS_PRIO_VALID (ingress_qos[i].from)) {
|
|
|
|
|
if (!qos) {
|
|
|
|
|
if (!(qos = nla_nest_start (msg, IFLA_VLAN_INGRESS_QOS)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
}
|
|
|
|
|
NLA_PUT (msg, i, sizeof (ingress_qos[i]), &ingress_qos[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
if (qos)
|
|
|
|
|
nla_nest_end (msg, qos);
|
2015-10-20 09:27:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (egress_qos && egress_qos_len > 0) {
|
2015-10-27 16:14:54 +01:00
|
|
|
struct nlattr *qos = NULL;
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
for (i = 0; i < egress_qos_len; i++) {
|
|
|
|
|
if (VLAN_XGRESS_PRIO_VALID (egress_qos[i].to)) {
|
|
|
|
|
if (!qos) {
|
2019-02-17 11:12:02 +01:00
|
|
|
if (!(qos = nla_nest_start (msg, IFLA_VLAN_EGRESS_QOS)))
|
2015-10-27 16:14:54 +01:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
}
|
|
|
|
|
NLA_PUT (msg, i, sizeof (egress_qos[i]), &egress_qos[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
if (qos)
|
2019-02-17 11:12:02 +01:00
|
|
|
nla_nest_end (msg, qos);
|
2015-10-20 09:27:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nla_nest_end (msg, data);
|
|
|
|
|
nla_nest_end (msg, info);
|
|
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
|
nla_put_failure:
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct nl_msg *
|
|
|
|
|
_nl_msg_new_link (int nlmsg_type,
|
|
|
|
|
int nlmsg_flags,
|
|
|
|
|
int ifindex,
|
|
|
|
|
const char *ifname,
|
2015-11-02 14:27:22 +01:00
|
|
|
unsigned flags_mask,
|
|
|
|
|
unsigned flags_set)
|
2015-10-20 09:27:16 +02:00
|
|
|
{
|
2019-02-19 16:48:25 +01:00
|
|
|
nm_auto_nlmsg struct nl_msg *msg = NULL;
|
2019-02-19 16:35:59 +01:00
|
|
|
const struct ifinfomsg ifi = {
|
2015-11-02 14:27:22 +01:00
|
|
|
.ifi_change = flags_mask,
|
|
|
|
|
.ifi_flags = flags_set,
|
2015-10-20 09:27:16 +02:00
|
|
|
.ifi_index = ifindex,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
nm_assert (NM_IN_SET (nlmsg_type, RTM_DELLINK, RTM_NEWLINK, RTM_GETLINK));
|
|
|
|
|
|
2018-02-16 17:43:26 +01:00
|
|
|
msg = nlmsg_alloc_simple (nlmsg_type, nlmsg_flags);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2019-02-19 16:35:59 +01:00
|
|
|
if (nlmsg_append_struct (msg, &ifi) < 0)
|
2015-10-20 09:27:16 +02:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (ifname)
|
|
|
|
|
NLA_PUT_STRING (msg, IFLA_IFNAME, ifname);
|
|
|
|
|
|
2019-02-19 16:48:25 +01:00
|
|
|
return g_steal_pointer (&msg);
|
|
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Copied and modified from libnl3's build_addr_msg(). */
|
|
|
|
|
static struct nl_msg *
|
|
|
|
|
_nl_msg_new_address (int nlmsg_type,
|
|
|
|
|
int nlmsg_flags,
|
|
|
|
|
int family,
|
|
|
|
|
int ifindex,
|
|
|
|
|
gconstpointer address,
|
2016-04-06 18:04:26 +02:00
|
|
|
guint8 plen,
|
2015-10-20 09:27:16 +02:00
|
|
|
gconstpointer peer_address,
|
|
|
|
|
guint32 flags,
|
|
|
|
|
int scope,
|
|
|
|
|
guint32 lifetime,
|
|
|
|
|
guint32 preferred,
|
|
|
|
|
const char *label)
|
|
|
|
|
{
|
2019-02-19 16:48:25 +01:00
|
|
|
nm_auto_nlmsg struct nl_msg *msg = NULL;
|
2015-10-20 09:27:16 +02:00
|
|
|
struct ifaddrmsg am = {
|
|
|
|
|
.ifa_family = family,
|
|
|
|
|
.ifa_index = ifindex,
|
|
|
|
|
.ifa_prefixlen = plen,
|
|
|
|
|
.ifa_flags = flags,
|
|
|
|
|
};
|
|
|
|
|
gsize addr_len;
|
|
|
|
|
|
|
|
|
|
nm_assert (NM_IN_SET (family, AF_INET, AF_INET6));
|
|
|
|
|
nm_assert (NM_IN_SET (nlmsg_type, RTM_NEWADDR, RTM_DELADDR));
|
|
|
|
|
|
|
|
|
|
msg = nlmsg_alloc_simple (nlmsg_type, nlmsg_flags);
|
|
|
|
|
|
|
|
|
|
if (scope == -1) {
|
|
|
|
|
/* Allow having scope unset, and detect the scope (including IPv4 compatibility hack). */
|
|
|
|
|
if ( family == AF_INET
|
|
|
|
|
&& address
|
|
|
|
|
&& *((char *) address) == 127)
|
|
|
|
|
scope = RT_SCOPE_HOST;
|
|
|
|
|
else
|
|
|
|
|
scope = RT_SCOPE_UNIVERSE;
|
|
|
|
|
}
|
|
|
|
|
am.ifa_scope = scope,
|
|
|
|
|
|
|
|
|
|
addr_len = family == AF_INET ? sizeof (in_addr_t) : sizeof (struct in6_addr);
|
|
|
|
|
|
2019-02-19 16:35:59 +01:00
|
|
|
if (nlmsg_append_struct (msg, &am) < 0)
|
2015-10-20 09:27:16 +02:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (address)
|
|
|
|
|
NLA_PUT (msg, IFA_LOCAL, addr_len, address);
|
|
|
|
|
|
|
|
|
|
if (peer_address)
|
|
|
|
|
NLA_PUT (msg, IFA_ADDRESS, addr_len, peer_address);
|
|
|
|
|
else if (address)
|
|
|
|
|
NLA_PUT (msg, IFA_ADDRESS, addr_len, address);
|
|
|
|
|
|
|
|
|
|
if (label && label[0])
|
|
|
|
|
NLA_PUT_STRING (msg, IFA_LABEL, label);
|
|
|
|
|
|
|
|
|
|
if ( family == AF_INET
|
|
|
|
|
&& nlmsg_type != RTM_DELADDR
|
|
|
|
|
&& address
|
|
|
|
|
&& *((in_addr_t *) address) != 0) {
|
|
|
|
|
in_addr_t broadcast;
|
|
|
|
|
|
2017-09-05 15:25:34 +02:00
|
|
|
broadcast = *((in_addr_t *) address) | ~_nm_utils_ip4_prefix_to_netmask (plen);
|
2015-10-20 09:27:16 +02:00
|
|
|
NLA_PUT (msg, IFA_BROADCAST, addr_len, &broadcast);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ( lifetime != NM_PLATFORM_LIFETIME_PERMANENT
|
|
|
|
|
|| preferred != NM_PLATFORM_LIFETIME_PERMANENT) {
|
|
|
|
|
struct ifa_cacheinfo ca = {
|
|
|
|
|
.ifa_valid = lifetime,
|
|
|
|
|
.ifa_prefered = preferred,
|
|
|
|
|
};
|
|
|
|
|
|
2019-02-17 11:12:02 +01:00
|
|
|
NLA_PUT (msg, IFA_CACHEINFO, sizeof (ca), &ca);
|
2015-10-20 09:27:16 +02:00
|
|
|
}
|
|
|
|
|
|
2016-02-29 17:06:21 +01:00
|
|
|
if (flags & ~((guint32) 0xFF)) {
|
2015-10-20 09:27:16 +02:00
|
|
|
/* only set the IFA_FLAGS attribute, if they actually contain additional
|
|
|
|
|
* flags that are not already set to am.ifa_flags.
|
|
|
|
|
*
|
|
|
|
|
* Older kernels refuse RTM_NEWADDR and RTM_NEWROUTE messages with EINVAL
|
|
|
|
|
* if they contain unknown netlink attributes. See net/core/rtnetlink.c, which
|
|
|
|
|
* was fixed by kernel commit 661d2967b3f1b34eeaa7e212e7b9bbe8ee072b59. */
|
|
|
|
|
NLA_PUT_U32 (msg, IFA_FLAGS, flags);
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-19 16:48:25 +01:00
|
|
|
return g_steal_pointer (&msg);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-27 07:21:01 +02:00
|
|
|
static guint32
|
|
|
|
|
ip_route_get_lock_flag (const NMPlatformIPRoute *route)
|
|
|
|
|
{
|
|
|
|
|
return (((guint32) route->lock_window) << RTAX_WINDOW)
|
|
|
|
|
| (((guint32) route->lock_cwnd) << RTAX_CWND)
|
|
|
|
|
| (((guint32) route->lock_initcwnd) << RTAX_INITCWND)
|
|
|
|
|
| (((guint32) route->lock_initrwnd) << RTAX_INITRWND)
|
|
|
|
|
| (((guint32) route->lock_mtu) << RTAX_MTU);
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
/* Copied and modified from libnl3's build_route_msg() and rtnl_route_build_msg(). */
|
|
|
|
|
static struct nl_msg *
|
|
|
|
|
_nl_msg_new_route (int nlmsg_type,
|
2017-08-21 18:02:08 +02:00
|
|
|
guint16 nlmsgflags,
|
2017-07-31 15:42:52 +02:00
|
|
|
const NMPObject *obj)
|
2015-10-20 09:27:16 +02:00
|
|
|
{
|
2019-02-19 16:48:25 +01:00
|
|
|
nm_auto_nlmsg struct nl_msg *msg = NULL;
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
const NMPClass *klass = NMP_OBJECT_GET_CLASS (obj);
|
|
|
|
|
gboolean is_v4 = klass->addr_family == AF_INET;
|
2017-07-27 07:21:01 +02:00
|
|
|
const guint32 lock = ip_route_get_lock_flag (NMP_OBJECT_CAST_IP_ROUTE (obj));
|
2017-09-22 15:26:56 +02:00
|
|
|
const guint32 table = nm_platform_route_table_uncoerce (NMP_OBJECT_CAST_IP_ROUTE (obj)->table_coerced, TRUE);
|
2019-02-19 16:35:59 +01:00
|
|
|
const struct rtmsg rtmsg = {
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
.rtm_family = klass->addr_family,
|
2017-08-02 07:16:35 +02:00
|
|
|
.rtm_tos = is_v4
|
|
|
|
|
? obj->ip4_route.tos
|
|
|
|
|
: 0,
|
2017-08-21 23:17:12 +02:00
|
|
|
.rtm_table = table <= 0xFF ? table : RT_TABLE_UNSPEC,
|
2017-07-19 12:51:22 +02:00
|
|
|
.rtm_protocol = nmp_utils_ip_config_source_coerce_to_rtprot (obj->ip_route.rt_source),
|
2017-07-19 13:03:28 +02:00
|
|
|
.rtm_scope = is_v4
|
|
|
|
|
? nm_platform_route_scope_inv (obj->ip4_route.scope_inv)
|
|
|
|
|
: RT_SCOPE_NOWHERE,
|
2015-10-20 09:27:16 +02:00
|
|
|
.rtm_type = RTN_UNICAST,
|
2017-11-07 16:26:58 +01:00
|
|
|
.rtm_flags = obj->ip_route.r_rtm_flags & (is_v4
|
|
|
|
|
? (unsigned) (RTNH_F_ONLINK)
|
|
|
|
|
: (unsigned) 0),
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
.rtm_dst_len = obj->ip_route.plen,
|
2017-07-26 09:25:21 +02:00
|
|
|
.rtm_src_len = is_v4
|
|
|
|
|
? 0
|
|
|
|
|
: NMP_OBJECT_CAST_IP6_ROUTE (obj)->src_plen,
|
2015-10-20 09:27:16 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
gsize addr_len;
|
|
|
|
|
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
nm_assert (NM_IN_SET (NMP_OBJECT_GET_TYPE (obj), NMP_OBJECT_TYPE_IP4_ROUTE, NMP_OBJECT_TYPE_IP6_ROUTE));
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_assert (NM_IN_SET (nlmsg_type, RTM_NEWROUTE, RTM_DELROUTE));
|
|
|
|
|
|
2017-08-02 10:27:32 +02:00
|
|
|
msg = nlmsg_alloc_simple (nlmsg_type, (int) nlmsgflags);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2019-02-19 16:35:59 +01:00
|
|
|
if (nlmsg_append_struct (msg, &rtmsg) < 0)
|
2015-10-20 09:27:16 +02:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
addr_len = is_v4
|
|
|
|
|
? sizeof (in_addr_t)
|
|
|
|
|
: sizeof (struct in6_addr);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
NLA_PUT (msg, RTA_DST, addr_len,
|
|
|
|
|
is_v4
|
|
|
|
|
? (gconstpointer) &obj->ip4_route.network
|
|
|
|
|
: (gconstpointer) &obj->ip6_route.network);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2017-07-26 09:25:21 +02:00
|
|
|
if (!is_v4) {
|
|
|
|
|
if (!IN6_IS_ADDR_UNSPECIFIED (&NMP_OBJECT_CAST_IP6_ROUTE (obj)->src))
|
|
|
|
|
NLA_PUT (msg, RTA_SRC, addr_len, &obj->ip6_route.src);
|
|
|
|
|
}
|
2017-02-15 14:00:44 +01:00
|
|
|
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
NLA_PUT_U32 (msg, RTA_PRIORITY, obj->ip_route.metric);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2017-08-21 23:17:12 +02:00
|
|
|
if (table > 0xFF)
|
|
|
|
|
NLA_PUT_U32 (msg, RTA_TABLE, table);
|
|
|
|
|
|
2017-07-26 09:25:21 +02:00
|
|
|
if (is_v4) {
|
|
|
|
|
if (NMP_OBJECT_CAST_IP4_ROUTE (obj)->pref_src)
|
|
|
|
|
NLA_PUT (msg, RTA_PREFSRC, addr_len, &obj->ip4_route.pref_src);
|
|
|
|
|
} else {
|
|
|
|
|
if (!IN6_IS_ADDR_UNSPECIFIED (&NMP_OBJECT_CAST_IP6_ROUTE (obj)->pref_src))
|
|
|
|
|
NLA_PUT (msg, RTA_PREFSRC, addr_len, &obj->ip6_route.pref_src);
|
|
|
|
|
}
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2017-07-26 07:26:18 +02:00
|
|
|
if ( obj->ip_route.mss
|
2017-07-27 06:47:11 +02:00
|
|
|
|| obj->ip_route.window
|
2017-07-31 15:42:52 +02:00
|
|
|
|| obj->ip_route.cwnd
|
|
|
|
|
|| obj->ip_route.initcwnd
|
|
|
|
|
|| obj->ip_route.initrwnd
|
|
|
|
|
|| obj->ip_route.mtu
|
2017-07-27 06:47:11 +02:00
|
|
|
|| lock) {
|
2015-10-20 09:27:16 +02:00
|
|
|
struct nlattr *metrics;
|
|
|
|
|
|
|
|
|
|
metrics = nla_nest_start (msg, RTA_METRICS);
|
|
|
|
|
if (!metrics)
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
2017-07-26 07:26:18 +02:00
|
|
|
if (obj->ip_route.mss)
|
|
|
|
|
NLA_PUT_U32 (msg, RTAX_ADVMSS, obj->ip_route.mss);
|
2017-07-27 06:47:11 +02:00
|
|
|
if (obj->ip_route.window)
|
|
|
|
|
NLA_PUT_U32 (msg, RTAX_WINDOW, obj->ip_route.window);
|
2017-07-31 15:42:52 +02:00
|
|
|
if (obj->ip_route.cwnd)
|
|
|
|
|
NLA_PUT_U32 (msg, RTAX_CWND, obj->ip_route.cwnd);
|
|
|
|
|
if (obj->ip_route.initcwnd)
|
|
|
|
|
NLA_PUT_U32 (msg, RTAX_INITCWND, obj->ip_route.initcwnd);
|
|
|
|
|
if (obj->ip_route.initrwnd)
|
|
|
|
|
NLA_PUT_U32 (msg, RTAX_INITRWND, obj->ip_route.initrwnd);
|
|
|
|
|
if (obj->ip_route.mtu)
|
|
|
|
|
NLA_PUT_U32 (msg, RTAX_MTU, obj->ip_route.mtu);
|
2017-02-15 14:00:44 +01:00
|
|
|
if (lock)
|
|
|
|
|
NLA_PUT_U32 (msg, RTAX_LOCK, lock);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2019-02-17 11:12:02 +01:00
|
|
|
nla_nest_end (msg, metrics);
|
2015-10-20 09:27:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We currently don't have need for multi-hop routes... */
|
2017-07-19 13:03:28 +02:00
|
|
|
if (is_v4) {
|
|
|
|
|
NLA_PUT (msg, RTA_GATEWAY, addr_len, &obj->ip4_route.gateway);
|
|
|
|
|
} else {
|
|
|
|
|
if (!IN6_IS_ADDR_UNSPECIFIED (&obj->ip6_route.gateway))
|
|
|
|
|
NLA_PUT (msg, RTA_GATEWAY, addr_len, &obj->ip6_route.gateway);
|
|
|
|
|
}
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
NLA_PUT_U32 (msg, RTA_OIF, obj->ip_route.ifindex);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2017-10-09 11:09:16 +02:00
|
|
|
if ( !is_v4
|
|
|
|
|
&& obj->ip6_route.rt_pref != NM_ICMPV6_ROUTER_PREF_MEDIUM)
|
|
|
|
|
NLA_PUT_U8 (msg, RTA_PREF, obj->ip6_route.rt_pref);
|
|
|
|
|
|
2019-02-19 16:48:25 +01:00
|
|
|
return g_steal_pointer (&msg);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-15 20:36:35 +01:00
|
|
|
static struct nl_msg *
|
|
|
|
|
_nl_msg_new_qdisc (int nlmsg_type,
|
|
|
|
|
int nlmsg_flags,
|
|
|
|
|
const NMPlatformQdisc *qdisc)
|
|
|
|
|
{
|
2019-02-19 16:48:25 +01:00
|
|
|
nm_auto_nlmsg struct nl_msg *msg = NULL;
|
2019-02-19 16:35:59 +01:00
|
|
|
const struct tcmsg tcm = {
|
2017-11-15 20:36:35 +01:00
|
|
|
.tcm_family = qdisc->addr_family,
|
|
|
|
|
.tcm_ifindex = qdisc->ifindex,
|
|
|
|
|
.tcm_handle = qdisc->handle,
|
|
|
|
|
.tcm_parent = qdisc->parent,
|
|
|
|
|
.tcm_info = qdisc->info,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
msg = nlmsg_alloc_simple (nlmsg_type, nlmsg_flags);
|
|
|
|
|
|
2019-02-19 16:35:59 +01:00
|
|
|
if (nlmsg_append_struct (msg, &tcm) < 0)
|
2017-11-15 20:36:35 +01:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (msg, TCA_KIND, qdisc->kind);
|
|
|
|
|
|
2019-02-19 16:48:25 +01:00
|
|
|
return g_steal_pointer (&msg);
|
|
|
|
|
|
2017-11-15 20:36:35 +01:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-15 20:36:35 +01:00
|
|
|
static gboolean
|
|
|
|
|
_add_action_simple (struct nl_msg *msg,
|
|
|
|
|
const NMPlatformActionSimple *simple)
|
|
|
|
|
{
|
|
|
|
|
struct nlattr *act_options;
|
|
|
|
|
struct tc_defact sel = { 0, };
|
|
|
|
|
|
|
|
|
|
if (!(act_options = nla_nest_start (msg, TCA_ACT_OPTIONS)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT (msg, TCA_DEF_PARMS, sizeof (sel), &sel);
|
|
|
|
|
NLA_PUT (msg, TCA_DEF_DATA, sizeof (simple->sdata), simple->sdata);
|
|
|
|
|
|
|
|
|
|
nla_nest_end (msg, act_options);
|
|
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_add_action (struct nl_msg *msg,
|
|
|
|
|
const NMPlatformAction *action)
|
|
|
|
|
{
|
|
|
|
|
struct nlattr *prio;
|
|
|
|
|
|
2017-12-10 12:52:41 +01:00
|
|
|
nm_assert (action || action->kind);
|
|
|
|
|
|
2017-11-15 20:36:35 +01:00
|
|
|
if (!(prio = nla_nest_start (msg, 1 /* priority */)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (msg, TCA_ACT_KIND, action->kind);
|
|
|
|
|
|
2017-12-10 12:52:41 +01:00
|
|
|
if (nm_streq (action->kind, NM_PLATFORM_ACTION_KIND_SIMPLE))
|
2017-11-15 20:36:35 +01:00
|
|
|
_add_action_simple (msg, &action->simple);
|
|
|
|
|
|
|
|
|
|
nla_nest_end (msg, prio);
|
|
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct nl_msg *
|
|
|
|
|
_nl_msg_new_tfilter (int nlmsg_type,
|
|
|
|
|
int nlmsg_flags,
|
|
|
|
|
const NMPlatformTfilter *tfilter)
|
|
|
|
|
{
|
2019-02-19 16:48:25 +01:00
|
|
|
nm_auto_nlmsg struct nl_msg *msg = NULL;
|
2017-11-15 20:36:35 +01:00
|
|
|
struct nlattr *tc_options;
|
|
|
|
|
struct nlattr *act_tab;
|
2019-02-19 16:35:59 +01:00
|
|
|
const struct tcmsg tcm = {
|
2017-11-15 20:36:35 +01:00
|
|
|
.tcm_family = tfilter->addr_family,
|
|
|
|
|
.tcm_ifindex = tfilter->ifindex,
|
|
|
|
|
.tcm_handle = tfilter->handle,
|
|
|
|
|
.tcm_parent = tfilter->parent,
|
|
|
|
|
.tcm_info = tfilter->info,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
msg = nlmsg_alloc_simple (nlmsg_type, nlmsg_flags);
|
|
|
|
|
|
2019-02-19 16:35:59 +01:00
|
|
|
if (nlmsg_append_struct (msg, &tcm) < 0)
|
2017-11-15 20:36:35 +01:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (msg, TCA_KIND, tfilter->kind);
|
|
|
|
|
|
|
|
|
|
if (!(tc_options = nla_nest_start (msg, TCA_OPTIONS)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (!(act_tab = nla_nest_start (msg, TCA_OPTIONS))) // 3 TCA_ACT_KIND TCA_ACT_KIND
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (tfilter->action.kind)
|
|
|
|
|
_add_action (msg, &tfilter->action);
|
|
|
|
|
|
|
|
|
|
nla_nest_end (msg, tc_options);
|
|
|
|
|
|
|
|
|
|
nla_nest_end (msg, act_tab);
|
|
|
|
|
|
2019-02-19 16:48:25 +01:00
|
|
|
return g_steal_pointer (&msg);
|
|
|
|
|
|
2017-11-15 20:36:35 +01:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-09 10:11:29 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2018-05-15 20:29:30 +02:00
|
|
|
static struct nl_sock *
|
|
|
|
|
_genl_sock (NMLinuxPlatform *platform)
|
|
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
|
|
|
|
|
return priv->genl;
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-08 14:29:00 +01:00
|
|
|
#define ASSERT_SYSCTL_ARGS(pathid, dirfd, path) \
|
|
|
|
|
G_STMT_START { \
|
|
|
|
|
const char *const _pathid = (pathid); \
|
|
|
|
|
const int _dirfd = (dirfd); \
|
|
|
|
|
const char *const _path = (path); \
|
|
|
|
|
\
|
|
|
|
|
nm_assert (_path && _path[0]); \
|
|
|
|
|
g_assert (!strstr (_path, "/../")); \
|
|
|
|
|
if (_dirfd < 0) { \
|
|
|
|
|
nm_assert (!_pathid); \
|
|
|
|
|
nm_assert (_path[0] == '/'); \
|
|
|
|
|
nm_assert ( g_str_has_prefix (_path, "/proc/sys/") \
|
|
|
|
|
|| g_str_has_prefix (_path, "/sys/")); \
|
|
|
|
|
} else { \
|
|
|
|
|
nm_assert (_pathid && _pathid[0] && _pathid[0] != '/'); \
|
|
|
|
|
nm_assert (_path[0] != '/'); \
|
|
|
|
|
} \
|
|
|
|
|
} G_STMT_END
|
|
|
|
|
|
2015-12-15 11:14:34 +01:00
|
|
|
static void
|
2016-12-08 15:12:52 +01:00
|
|
|
_log_dbg_sysctl_set_impl (NMPlatform *platform, const char *pathid, int dirfd, const char *path, const char *value)
|
2015-12-15 11:14:34 +01:00
|
|
|
{
|
|
|
|
|
GError *error = NULL;
|
2017-02-07 15:19:57 +01:00
|
|
|
char *contents;
|
|
|
|
|
gs_free char *value_escaped = g_strescape (value, NULL);
|
2015-12-15 11:14:34 +01:00
|
|
|
|
2018-08-30 13:56:05 +02:00
|
|
|
if (nm_utils_file_get_contents (dirfd, path, 1*1024*1024,
|
|
|
|
|
NM_UTILS_FILE_GET_CONTENTS_FLAG_NONE,
|
|
|
|
|
&contents, NULL, &error) < 0) {
|
2016-12-08 15:12:52 +01:00
|
|
|
_LOGD ("sysctl: setting '%s' to '%s' (current value cannot be read: %s)", pathid, value_escaped, error->message);
|
2015-12-15 11:14:34 +01:00
|
|
|
g_clear_error (&error);
|
2017-02-07 15:19:57 +01:00
|
|
|
return;
|
2015-12-15 11:14:34 +01:00
|
|
|
}
|
2017-02-07 15:19:57 +01:00
|
|
|
|
|
|
|
|
g_strstrip (contents);
|
|
|
|
|
if (nm_streq (contents, value))
|
|
|
|
|
_LOGD ("sysctl: setting '%s' to '%s' (current value is identical)", pathid, value_escaped);
|
|
|
|
|
else {
|
|
|
|
|
gs_free char *contents_escaped = g_strescape (contents, NULL);
|
|
|
|
|
|
|
|
|
|
_LOGD ("sysctl: setting '%s' to '%s' (current value is '%s')", pathid, value_escaped, contents_escaped);
|
|
|
|
|
}
|
|
|
|
|
g_free (contents);
|
2015-12-15 11:14:34 +01:00
|
|
|
}
|
|
|
|
|
|
2016-12-08 15:12:52 +01:00
|
|
|
#define _log_dbg_sysctl_set(platform, pathid, dirfd, path, value) \
|
2015-12-15 11:14:34 +01:00
|
|
|
G_STMT_START { \
|
|
|
|
|
if (_LOGD_ENABLED ()) { \
|
2016-12-08 15:12:52 +01:00
|
|
|
_log_dbg_sysctl_set_impl (platform, pathid, dirfd, path, value); \
|
2015-12-15 11:14:34 +01:00
|
|
|
} \
|
|
|
|
|
} G_STMT_END
|
|
|
|
|
|
|
|
|
|
static gboolean
|
2016-12-08 14:29:00 +01:00
|
|
|
sysctl_set (NMPlatform *platform, const char *pathid, int dirfd, const char *path, const char *value)
|
2015-12-15 11:14:34 +01:00
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
2016-02-23 23:54:43 +01:00
|
|
|
int fd, tries;
|
|
|
|
|
gssize nwrote;
|
2017-10-23 16:59:10 +02:00
|
|
|
gssize len;
|
2015-12-15 11:14:34 +01:00
|
|
|
char *actual;
|
2016-02-23 23:54:43 +01:00
|
|
|
gs_free char *actual_free = NULL;
|
2016-04-20 11:09:25 +02:00
|
|
|
int errsv;
|
2015-12-15 11:14:34 +01:00
|
|
|
|
|
|
|
|
g_return_val_if_fail (path != NULL, FALSE);
|
|
|
|
|
g_return_val_if_fail (value != NULL, FALSE);
|
|
|
|
|
|
2016-12-08 14:29:00 +01:00
|
|
|
ASSERT_SYSCTL_ARGS (pathid, dirfd, path);
|
2015-12-15 11:14:34 +01:00
|
|
|
|
2016-12-08 15:12:52 +01:00
|
|
|
if (dirfd < 0) {
|
|
|
|
|
if (!nm_platform_netns_push (platform, &netns)) {
|
|
|
|
|
errno = ENETDOWN;
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2016-02-19 01:06:28 +01:00
|
|
|
|
2016-12-08 15:12:52 +01:00
|
|
|
pathid = path;
|
|
|
|
|
|
|
|
|
|
fd = open (path, O_WRONLY | O_TRUNC | O_CLOEXEC);
|
|
|
|
|
if (fd == -1) {
|
|
|
|
|
errsv = errno;
|
|
|
|
|
if (errsv == ENOENT) {
|
|
|
|
|
_LOGD ("sysctl: failed to open '%s': (%d) %s",
|
2019-01-31 17:22:18 +01:00
|
|
|
pathid, errsv, nm_strerror_native (errsv));
|
2016-12-08 15:12:52 +01:00
|
|
|
} else {
|
|
|
|
|
_LOGE ("sysctl: failed to open '%s': (%d) %s",
|
2019-01-31 17:22:18 +01:00
|
|
|
pathid, errsv, nm_strerror_native (errsv));
|
2016-12-08 15:12:52 +01:00
|
|
|
}
|
|
|
|
|
errno = errsv;
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
fd = openat (dirfd, path, O_WRONLY | O_TRUNC | O_CLOEXEC);
|
|
|
|
|
if (fd == -1) {
|
|
|
|
|
errsv = errno;
|
|
|
|
|
if (errsv == ENOENT) {
|
|
|
|
|
_LOGD ("sysctl: failed to openat '%s': (%d) %s",
|
2019-01-31 17:22:18 +01:00
|
|
|
pathid, errsv, nm_strerror_native (errsv));
|
2016-12-08 15:12:52 +01:00
|
|
|
} else {
|
|
|
|
|
_LOGE ("sysctl: failed to openat '%s': (%d) %s",
|
2019-01-31 17:22:18 +01:00
|
|
|
pathid, errsv, nm_strerror_native (errsv));
|
2016-12-08 15:12:52 +01:00
|
|
|
}
|
|
|
|
|
errno = errsv;
|
|
|
|
|
return FALSE;
|
2015-12-15 11:14:34 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-08 15:12:52 +01:00
|
|
|
_log_dbg_sysctl_set (platform, pathid, dirfd, path, value);
|
2015-12-15 11:14:34 +01:00
|
|
|
|
|
|
|
|
/* Most sysfs and sysctl options don't care about a trailing LF, while some
|
|
|
|
|
* (like infiniband) do. So always add the LF. Also, neither sysfs nor
|
|
|
|
|
* sysctl support partial writes so the LF must be added to the string we're
|
|
|
|
|
* about to write.
|
|
|
|
|
*/
|
2016-02-23 23:54:43 +01:00
|
|
|
len = strlen (value) + 1;
|
2017-10-23 16:59:10 +02:00
|
|
|
nm_assert (len > 0);
|
2016-02-23 23:54:43 +01:00
|
|
|
if (len > 512)
|
|
|
|
|
actual = actual_free = g_malloc (len + 1);
|
|
|
|
|
else
|
|
|
|
|
actual = g_alloca (len + 1);
|
|
|
|
|
memcpy (actual, value, len - 1);
|
|
|
|
|
actual[len - 1] = '\n';
|
|
|
|
|
actual[len] = '\0';
|
2015-12-15 11:14:34 +01:00
|
|
|
|
|
|
|
|
/* Try to write the entire value three times if a partial write occurs */
|
2016-04-20 11:09:25 +02:00
|
|
|
errsv = 0;
|
2016-10-28 18:15:45 +00:00
|
|
|
for (tries = 0, nwrote = 0; tries < 3 && nwrote < len - 1; tries++) {
|
2015-12-15 11:14:34 +01:00
|
|
|
nwrote = write (fd, actual, len);
|
|
|
|
|
if (nwrote == -1) {
|
2016-04-20 11:09:25 +02:00
|
|
|
errsv = errno;
|
|
|
|
|
if (errsv == EINTR) {
|
2015-12-15 11:14:34 +01:00
|
|
|
_LOGD ("sysctl: interrupted, will try again");
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-10-23 17:36:20 +02:00
|
|
|
if (nwrote == -1) {
|
2017-10-23 14:27:08 +02:00
|
|
|
NMLogLevel level = LOGL_ERR;
|
|
|
|
|
|
2017-10-23 17:36:20 +02:00
|
|
|
if (errsv == EEXIST) {
|
|
|
|
|
level = LOGL_DEBUG;
|
|
|
|
|
} else if ( errsv == EINVAL
|
|
|
|
|
&& nm_utils_sysctl_ip_conf_is_path (AF_INET6, path, NULL, "mtu")) {
|
2017-10-23 14:27:08 +02:00
|
|
|
/* setting the MTU can fail under regular conditions. Suppress
|
|
|
|
|
* logging a warning. */
|
|
|
|
|
level = LOGL_DEBUG;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_NMLOG (level, "sysctl: failed to set '%s' to '%s': (%d) %s",
|
2019-01-31 17:22:18 +01:00
|
|
|
path, value, errsv, nm_strerror_native (errsv));
|
2016-10-28 18:15:45 +00:00
|
|
|
} else if (nwrote < len - 1) {
|
2015-12-15 11:14:34 +01:00
|
|
|
_LOGE ("sysctl: failed to set '%s' to '%s' after three attempts",
|
|
|
|
|
path, value);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-28 18:15:45 +00:00
|
|
|
if (nwrote < len - 1) {
|
2017-10-18 11:36:30 +02:00
|
|
|
if (nm_close (fd) != 0) {
|
2016-04-20 11:09:25 +02:00
|
|
|
if (errsv != 0)
|
|
|
|
|
errno = errsv;
|
|
|
|
|
} else if (errsv != 0)
|
|
|
|
|
errno = errsv;
|
|
|
|
|
else
|
|
|
|
|
errno = EIO;
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2017-10-18 11:36:30 +02:00
|
|
|
if (nm_close (fd) != 0) {
|
2016-04-20 11:09:25 +02:00
|
|
|
/* errno is already properly set. */
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* success. errno is undefined (no need to set). */
|
|
|
|
|
return TRUE;
|
2015-12-15 11:14:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static GSList *sysctl_clear_cache_list;
|
|
|
|
|
|
2019-01-15 16:41:57 +01:00
|
|
|
void
|
|
|
|
|
_nm_logging_clear_platform_logging_cache (void)
|
2015-12-15 11:14:34 +01:00
|
|
|
{
|
|
|
|
|
while (sysctl_clear_cache_list) {
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (sysctl_clear_cache_list->data);
|
|
|
|
|
|
|
|
|
|
sysctl_clear_cache_list = g_slist_delete_link (sysctl_clear_cache_list, sysctl_clear_cache_list);
|
|
|
|
|
|
|
|
|
|
g_hash_table_destroy (priv->sysctl_get_prev_values);
|
|
|
|
|
priv->sysctl_get_prev_values = NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-05 11:11:16 +01:00
|
|
|
typedef struct {
|
|
|
|
|
const char *path;
|
|
|
|
|
CList lst;
|
|
|
|
|
char *value;
|
|
|
|
|
char path_data[];
|
|
|
|
|
} SysctlCacheEntry;
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
sysctl_cache_entry_free (SysctlCacheEntry *entry)
|
|
|
|
|
{
|
|
|
|
|
c_list_unlink_stale (&entry->lst);
|
|
|
|
|
g_free (entry->value);
|
|
|
|
|
g_free (entry);
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-15 11:14:34 +01:00
|
|
|
static void
|
2016-12-08 15:12:52 +01:00
|
|
|
_log_dbg_sysctl_get_impl (NMPlatform *platform, const char *pathid, const char *contents)
|
2015-12-15 11:14:34 +01:00
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
2019-02-05 11:11:16 +01:00
|
|
|
SysctlCacheEntry *entry = NULL;
|
2015-12-15 11:14:34 +01:00
|
|
|
|
|
|
|
|
if (!priv->sysctl_get_prev_values) {
|
|
|
|
|
sysctl_clear_cache_list = g_slist_prepend (sysctl_clear_cache_list, platform);
|
2019-02-05 11:11:16 +01:00
|
|
|
c_list_init (&priv->sysctl_list);
|
|
|
|
|
priv->sysctl_get_prev_values = g_hash_table_new_full (nm_pstr_hash,
|
|
|
|
|
nm_pstr_equal,
|
|
|
|
|
(GDestroyNotify) sysctl_cache_entry_free,
|
|
|
|
|
NULL);
|
2015-12-15 11:14:34 +01:00
|
|
|
} else
|
2019-02-05 11:11:16 +01:00
|
|
|
entry = g_hash_table_lookup (priv->sysctl_get_prev_values, &pathid);
|
2015-12-15 11:14:34 +01:00
|
|
|
|
2019-02-05 11:11:16 +01:00
|
|
|
if (entry) {
|
|
|
|
|
if (!nm_streq (entry->value, contents)) {
|
2017-02-07 15:19:57 +01:00
|
|
|
gs_free char *contents_escaped = g_strescape (contents, NULL);
|
2019-02-05 11:11:16 +01:00
|
|
|
gs_free char *prev_value_escaped = g_strescape (entry->value, NULL);
|
2015-12-15 11:14:34 +01:00
|
|
|
|
2016-12-08 15:12:52 +01:00
|
|
|
_LOGD ("sysctl: reading '%s': '%s' (changed from '%s' on last read)", pathid, contents_escaped, prev_value_escaped);
|
2019-02-05 11:11:16 +01:00
|
|
|
g_free (entry->value);
|
|
|
|
|
entry->value = g_strdup (contents);
|
2015-12-15 11:14:34 +01:00
|
|
|
}
|
2019-02-12 11:05:43 +01:00
|
|
|
nm_c_list_move_front (&priv->sysctl_list, &entry->lst);
|
2015-12-15 11:14:34 +01:00
|
|
|
} else {
|
2017-02-07 15:19:57 +01:00
|
|
|
gs_free char *contents_escaped = g_strescape (contents, NULL);
|
2019-02-05 11:11:16 +01:00
|
|
|
SysctlCacheEntry *old;
|
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
|
|
len = strlen (pathid);
|
|
|
|
|
entry = g_malloc (sizeof (SysctlCacheEntry) + len + 1);
|
|
|
|
|
entry->value = g_strdup (contents);
|
|
|
|
|
entry->path = entry->path_data;
|
|
|
|
|
memcpy (entry->path_data, pathid, len + 1);
|
|
|
|
|
|
|
|
|
|
/* Remove oldest entry when the cache becomes too big */
|
|
|
|
|
if (g_hash_table_size (priv->sysctl_get_prev_values) > 1000) {
|
|
|
|
|
old = c_list_last_entry (&priv->sysctl_list, SysctlCacheEntry, lst);
|
|
|
|
|
g_hash_table_remove (priv->sysctl_get_prev_values, old);
|
|
|
|
|
}
|
2015-12-15 11:14:34 +01:00
|
|
|
|
2016-12-08 15:12:52 +01:00
|
|
|
_LOGD ("sysctl: reading '%s': '%s'", pathid, contents_escaped);
|
2015-12-15 11:14:34 +01:00
|
|
|
|
2019-02-05 11:11:16 +01:00
|
|
|
g_hash_table_add (priv->sysctl_get_prev_values, entry);
|
|
|
|
|
c_list_link_front (&priv->sysctl_list, &entry->lst);
|
2015-12-15 11:14:34 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-08 15:12:52 +01:00
|
|
|
#define _log_dbg_sysctl_get(platform, pathid, contents) \
|
2015-12-15 11:14:34 +01:00
|
|
|
G_STMT_START { \
|
|
|
|
|
if (_LOGD_ENABLED ()) \
|
2016-12-08 15:12:52 +01:00
|
|
|
_log_dbg_sysctl_get_impl (platform, pathid, contents); \
|
2015-12-15 11:14:34 +01:00
|
|
|
} G_STMT_END
|
|
|
|
|
|
|
|
|
|
static char *
|
2016-12-08 14:29:00 +01:00
|
|
|
sysctl_get (NMPlatform *platform, const char *pathid, int dirfd, const char *path)
|
2015-12-15 11:14:34 +01:00
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
2015-12-15 11:14:34 +01:00
|
|
|
GError *error = NULL;
|
|
|
|
|
char *contents;
|
|
|
|
|
|
2016-12-08 14:29:00 +01:00
|
|
|
ASSERT_SYSCTL_ARGS (pathid, dirfd, path);
|
2015-12-15 11:14:34 +01:00
|
|
|
|
2016-12-08 15:12:52 +01:00
|
|
|
if (dirfd < 0) {
|
|
|
|
|
if (!nm_platform_netns_push (platform, &netns))
|
|
|
|
|
return NULL;
|
|
|
|
|
pathid = path;
|
|
|
|
|
}
|
2016-02-19 01:06:28 +01:00
|
|
|
|
2018-08-30 13:56:05 +02:00
|
|
|
if (nm_utils_file_get_contents (dirfd, path, 1*1024*1024,
|
|
|
|
|
NM_UTILS_FILE_GET_CONTENTS_FLAG_NONE,
|
|
|
|
|
&contents, NULL, &error) < 0) {
|
2015-12-15 11:14:34 +01:00
|
|
|
/* We assume FAILED means EOPNOTSUP */
|
|
|
|
|
if ( g_error_matches (error, G_FILE_ERROR, G_FILE_ERROR_NOENT)
|
2016-08-25 13:11:21 +02:00
|
|
|
|| g_error_matches (error, G_FILE_ERROR, G_FILE_ERROR_NODEV)
|
2015-12-15 11:14:34 +01:00
|
|
|
|| g_error_matches (error, G_FILE_ERROR, G_FILE_ERROR_FAILED))
|
2016-12-08 15:12:52 +01:00
|
|
|
_LOGD ("error reading %s: %s", pathid, error->message);
|
2015-12-15 11:14:34 +01:00
|
|
|
else
|
2016-12-08 15:12:52 +01:00
|
|
|
_LOGE ("error reading %s: %s", pathid, error->message);
|
2015-12-15 11:14:34 +01:00
|
|
|
g_clear_error (&error);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
g_strstrip (contents);
|
|
|
|
|
|
2016-12-08 15:12:52 +01:00
|
|
|
_log_dbg_sysctl_get (platform, pathid, contents);
|
2015-12-15 11:14:34 +01:00
|
|
|
|
|
|
|
|
return contents;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-12-15 11:14:34 +01:00
|
|
|
|
2017-10-10 18:20:05 +02:00
|
|
|
static NMPlatformKernelSupportFlags
|
|
|
|
|
check_kernel_support (NMPlatform *platform,
|
|
|
|
|
NMPlatformKernelSupportFlags request_flags)
|
2015-10-12 16:07:01 +02:00
|
|
|
{
|
2017-10-10 18:20:05 +02:00
|
|
|
NMPlatformKernelSupportFlags response = 0;
|
|
|
|
|
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
nm_assert (NM_IS_LINUX_PLATFORM (platform));
|
2015-04-14 23:14:06 +02:00
|
|
|
|
2017-10-10 18:20:05 +02:00
|
|
|
if (NM_FLAGS_HAS (request_flags, NM_PLATFORM_KERNEL_SUPPORT_EXTENDED_IFA_FLAGS)) {
|
|
|
|
|
if (_support_kernel_extended_ifa_flags_get ())
|
|
|
|
|
response |= NM_PLATFORM_KERNEL_SUPPORT_EXTENDED_IFA_FLAGS;
|
|
|
|
|
}
|
2015-04-14 23:14:06 +02:00
|
|
|
|
2017-10-10 18:20:05 +02:00
|
|
|
if (NM_FLAGS_HAS (request_flags, NM_PLATFORM_KERNEL_SUPPORT_USER_IPV6LL)) {
|
|
|
|
|
if (_support_user_ipv6ll_get ())
|
|
|
|
|
response |= NM_PLATFORM_KERNEL_SUPPORT_USER_IPV6LL;
|
|
|
|
|
}
|
2015-04-14 23:14:06 +02:00
|
|
|
|
2017-10-11 09:15:04 +02:00
|
|
|
if (NM_FLAGS_HAS (request_flags, NM_PLATFORM_KERNEL_SUPPORT_RTA_PREF)) {
|
|
|
|
|
if (_support_rta_pref_get ())
|
|
|
|
|
response |= NM_PLATFORM_KERNEL_SUPPORT_RTA_PREF;
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-10 18:20:05 +02:00
|
|
|
return response;
|
2015-10-12 16:07:01 +02:00
|
|
|
}
|
2015-04-14 23:14:06 +02:00
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
static void
|
|
|
|
|
process_events (NMPlatform *platform)
|
|
|
|
|
{
|
|
|
|
|
delayed_action_handle_all (platform, TRUE);
|
2015-04-14 23:14:06 +02:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2016-04-08 12:40:35 +02:00
|
|
|
_NM_UTILS_LOOKUP_DEFINE (static, delayed_action_refresh_from_object_type, NMPObjectType, DelayedActionType,
|
|
|
|
|
NM_UTILS_LOOKUP_DEFAULT_NM_ASSERT (DELAYED_ACTION_TYPE_NONE),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (NMP_OBJECT_TYPE_LINK, DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (NMP_OBJECT_TYPE_IP4_ADDRESS, DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (NMP_OBJECT_TYPE_IP6_ADDRESS, DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (NMP_OBJECT_TYPE_IP4_ROUTE, DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (NMP_OBJECT_TYPE_IP6_ROUTE, DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES),
|
2017-11-15 20:36:35 +01:00
|
|
|
NM_UTILS_LOOKUP_ITEM (NMP_OBJECT_TYPE_QDISC, DELAYED_ACTION_TYPE_REFRESH_ALL_QDISCS),
|
2017-11-15 20:36:35 +01:00
|
|
|
NM_UTILS_LOOKUP_ITEM (NMP_OBJECT_TYPE_TFILTER, DELAYED_ACTION_TYPE_REFRESH_ALL_TFILTERS),
|
2016-04-08 12:40:35 +02:00
|
|
|
NM_UTILS_LOOKUP_ITEM_IGNORE_OTHER (),
|
|
|
|
|
);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2016-04-08 12:40:35 +02:00
|
|
|
_NM_UTILS_LOOKUP_DEFINE (static, delayed_action_refresh_to_object_type, DelayedActionType, NMPObjectType,
|
|
|
|
|
NM_UTILS_LOOKUP_DEFAULT_NM_ASSERT (NMP_OBJECT_TYPE_UNKNOWN),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS, NMP_OBJECT_TYPE_LINK),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES, NMP_OBJECT_TYPE_IP4_ADDRESS),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES, NMP_OBJECT_TYPE_IP6_ADDRESS),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES, NMP_OBJECT_TYPE_IP4_ROUTE),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES, NMP_OBJECT_TYPE_IP6_ROUTE),
|
2017-11-15 20:36:35 +01:00
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_QDISCS, NMP_OBJECT_TYPE_QDISC),
|
2017-11-15 20:36:35 +01:00
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_TFILTERS, NMP_OBJECT_TYPE_TFILTER),
|
2016-04-08 12:40:35 +02:00
|
|
|
NM_UTILS_LOOKUP_ITEM_IGNORE_OTHER (),
|
|
|
|
|
);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
_NM_UTILS_LOOKUP_DEFINE (static, delayed_action_refresh_all_to_idx, DelayedActionType, guint,
|
|
|
|
|
NM_UTILS_LOOKUP_DEFAULT_NM_ASSERT (0),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS, DELAYED_ACTION_IDX_REFRESH_ALL_LINKS),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES, DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ADDRESSES),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES, DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ADDRESSES),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES, DELAYED_ACTION_IDX_REFRESH_ALL_IP4_ROUTES),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES, DELAYED_ACTION_IDX_REFRESH_ALL_IP6_ROUTES),
|
2017-11-15 20:36:35 +01:00
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_QDISCS, DELAYED_ACTION_IDX_REFRESH_ALL_QDISCS),
|
2017-11-15 20:36:35 +01:00
|
|
|
NM_UTILS_LOOKUP_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_TFILTERS, DELAYED_ACTION_IDX_REFRESH_ALL_TFILTERS),
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
NM_UTILS_LOOKUP_ITEM_IGNORE_OTHER (),
|
|
|
|
|
);
|
|
|
|
|
|
2016-04-08 12:40:35 +02:00
|
|
|
NM_UTILS_LOOKUP_STR_DEFINE_STATIC (delayed_action_to_string, DelayedActionType,
|
|
|
|
|
NM_UTILS_LOOKUP_DEFAULT_NM_ASSERT ("unknown"),
|
|
|
|
|
NM_UTILS_LOOKUP_STR_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS, "refresh-all-links"),
|
|
|
|
|
NM_UTILS_LOOKUP_STR_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES, "refresh-all-ip4-addresses"),
|
|
|
|
|
NM_UTILS_LOOKUP_STR_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES, "refresh-all-ip6-addresses"),
|
|
|
|
|
NM_UTILS_LOOKUP_STR_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES, "refresh-all-ip4-routes"),
|
|
|
|
|
NM_UTILS_LOOKUP_STR_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES, "refresh-all-ip6-routes"),
|
2017-11-15 20:36:35 +01:00
|
|
|
NM_UTILS_LOOKUP_STR_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_QDISCS, "refresh-all-qdiscs"),
|
2017-11-15 20:36:35 +01:00
|
|
|
NM_UTILS_LOOKUP_STR_ITEM (DELAYED_ACTION_TYPE_REFRESH_ALL_TFILTERS, "refresh-all-tfilters"),
|
2016-04-08 12:40:35 +02:00
|
|
|
NM_UTILS_LOOKUP_STR_ITEM (DELAYED_ACTION_TYPE_REFRESH_LINK, "refresh-link"),
|
|
|
|
|
NM_UTILS_LOOKUP_STR_ITEM (DELAYED_ACTION_TYPE_MASTER_CONNECTED, "master-connected"),
|
|
|
|
|
NM_UTILS_LOOKUP_STR_ITEM (DELAYED_ACTION_TYPE_READ_NETLINK, "read-netlink"),
|
|
|
|
|
NM_UTILS_LOOKUP_STR_ITEM (DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE, "wait-for-nl-response"),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM_IGNORE (DELAYED_ACTION_TYPE_NONE),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM_IGNORE (DELAYED_ACTION_TYPE_REFRESH_ALL),
|
|
|
|
|
NM_UTILS_LOOKUP_ITEM_IGNORE (__DELAYED_ACTION_TYPE_MAX),
|
|
|
|
|
);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 12:09:50 +01:00
|
|
|
static const char *
|
|
|
|
|
delayed_action_to_string_full (DelayedActionType action_type, gpointer user_data, char *buf, gsize buf_size)
|
|
|
|
|
{
|
|
|
|
|
char *buf0 = buf;
|
2015-12-14 14:47:41 +01:00
|
|
|
const DelayedActionWaitForNlResponseData *data;
|
2015-12-14 12:09:50 +01:00
|
|
|
|
|
|
|
|
nm_utils_strbuf_append_str (&buf, &buf_size, delayed_action_to_string (action_type));
|
|
|
|
|
switch (action_type) {
|
|
|
|
|
case DELAYED_ACTION_TYPE_MASTER_CONNECTED:
|
|
|
|
|
nm_utils_strbuf_append (&buf, &buf_size, " (master-ifindex %d)", GPOINTER_TO_INT (user_data));
|
|
|
|
|
break;
|
|
|
|
|
case DELAYED_ACTION_TYPE_REFRESH_LINK:
|
|
|
|
|
nm_utils_strbuf_append (&buf, &buf_size, " (ifindex %d)", GPOINTER_TO_INT (user_data));
|
|
|
|
|
break;
|
2015-12-14 14:47:41 +01:00
|
|
|
case DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE:
|
|
|
|
|
data = user_data;
|
|
|
|
|
|
|
|
|
|
if (data) {
|
|
|
|
|
gint64 timeout = data->timeout_abs_ns - nm_utils_get_monotonic_timestamp_ns ();
|
|
|
|
|
char b[255];
|
|
|
|
|
|
2017-08-16 16:13:24 +02:00
|
|
|
nm_utils_strbuf_append (&buf, &buf_size, " (seq %u, timeout in %s%"G_GINT64_FORMAT".%09"G_GINT64_FORMAT", response-type %d%s%s)",
|
2015-12-14 14:47:41 +01:00
|
|
|
data->seq_number,
|
|
|
|
|
timeout < 0 ? "-" : "",
|
|
|
|
|
(timeout < 0 ? -timeout : timeout) / NM_UTILS_NS_PER_SECOND,
|
|
|
|
|
(timeout < 0 ? -timeout : timeout) % NM_UTILS_NS_PER_SECOND,
|
2017-08-16 16:13:24 +02:00
|
|
|
(int) data->response_type,
|
2015-12-14 14:47:41 +01:00
|
|
|
data->seq_result ? ", " : "",
|
2018-03-09 15:50:16 +01:00
|
|
|
data->seq_result ? wait_for_nl_response_to_string (data->seq_result, NULL, b, sizeof (b)) : "");
|
2015-12-14 14:47:41 +01:00
|
|
|
} else
|
|
|
|
|
nm_utils_strbuf_append_str (&buf, &buf_size, " (any)");
|
|
|
|
|
break;
|
2015-12-14 12:09:50 +01:00
|
|
|
default:
|
|
|
|
|
nm_assert (!user_data);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return buf0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define _LOGt_delayed_action(action_type, user_data, operation) \
|
|
|
|
|
G_STMT_START { \
|
|
|
|
|
char _buf[255]; \
|
|
|
|
|
\
|
|
|
|
|
_LOGt ("delayed-action: %s %s", \
|
|
|
|
|
""operation, \
|
|
|
|
|
delayed_action_to_string_full (action_type, user_data, _buf, sizeof (_buf))); \
|
|
|
|
|
} G_STMT_END
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
static gboolean
|
|
|
|
|
delayed_action_refresh_all_in_progress (NMPlatform *platform, DelayedActionType action_type)
|
|
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
|
|
|
|
|
nm_assert (nm_utils_is_power_of_two (action_type));
|
|
|
|
|
nm_assert (NM_FLAGS_ANY (action_type, DELAYED_ACTION_TYPE_REFRESH_ALL));
|
|
|
|
|
nm_assert (!NM_FLAGS_ANY (action_type, ~DELAYED_ACTION_TYPE_REFRESH_ALL));
|
|
|
|
|
|
|
|
|
|
if (NM_FLAGS_ANY (priv->delayed_action.flags, action_type))
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
2018-08-02 17:25:57 +02:00
|
|
|
if (priv->delayed_action.refresh_all_in_progress[delayed_action_refresh_all_to_idx (action_type)] > 0)
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
static void
|
|
|
|
|
delayed_action_wait_for_nl_response_complete (NMPlatform *platform,
|
|
|
|
|
guint idx,
|
|
|
|
|
WaitForNlResponseResult seq_result)
|
|
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
DelayedActionWaitForNlResponseData *data;
|
|
|
|
|
|
|
|
|
|
nm_assert (NM_FLAGS_HAS (priv->delayed_action.flags, DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE));
|
|
|
|
|
nm_assert (idx < priv->delayed_action.list_wait_for_nl_response->len);
|
|
|
|
|
nm_assert (seq_result);
|
|
|
|
|
|
|
|
|
|
data = &g_array_index (priv->delayed_action.list_wait_for_nl_response, DelayedActionWaitForNlResponseData, idx);
|
|
|
|
|
|
|
|
|
|
_LOGt_delayed_action (DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE, data, "complete");
|
|
|
|
|
|
2016-04-08 12:25:41 +02:00
|
|
|
if (priv->delayed_action.list_wait_for_nl_response->len <= 1)
|
2015-12-14 14:47:41 +01:00
|
|
|
priv->delayed_action.flags &= ~DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE;
|
2016-04-08 12:25:41 +02:00
|
|
|
if (data->out_seq_result)
|
|
|
|
|
*data->out_seq_result = seq_result;
|
2017-08-16 16:13:24 +02:00
|
|
|
switch (data->response_type) {
|
|
|
|
|
case DELAYED_ACTION_RESPONSE_TYPE_VOID:
|
|
|
|
|
break;
|
|
|
|
|
case DELAYED_ACTION_RESPONSE_TYPE_REFRESH_ALL_IN_PROGRESS:
|
2018-08-02 17:25:57 +02:00
|
|
|
if (data->response.out_refresh_all_in_progress) {
|
|
|
|
|
nm_assert (*data->response.out_refresh_all_in_progress > 0);
|
|
|
|
|
*data->response.out_refresh_all_in_progress -= 1;
|
|
|
|
|
data->response.out_refresh_all_in_progress = NULL;
|
2017-08-16 16:13:24 +02:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case DELAYED_ACTION_RESPONSE_TYPE_ROUTE_GET:
|
|
|
|
|
if (data->response.out_route_get) {
|
|
|
|
|
nm_assert (!*data->response.out_route_get);
|
|
|
|
|
data->response.out_route_get = NULL;
|
|
|
|
|
}
|
|
|
|
|
break;
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
}
|
2015-12-14 14:47:41 +01:00
|
|
|
|
2016-04-08 12:25:41 +02:00
|
|
|
g_array_remove_index_fast (priv->delayed_action.list_wait_for_nl_response, idx);
|
2015-12-14 14:47:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2018-02-19 13:42:03 +01:00
|
|
|
delayed_action_wait_for_nl_response_complete_check (NMPlatform *platform,
|
|
|
|
|
WaitForNlResponseResult force_result,
|
|
|
|
|
guint32 *out_next_seq_number,
|
|
|
|
|
gint64 *out_next_timeout_abs_ns,
|
|
|
|
|
gint64 *p_now_ns)
|
2015-12-14 14:47:41 +01:00
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
2018-02-19 13:42:03 +01:00
|
|
|
guint i;
|
|
|
|
|
guint32 next_seq_number = 0;
|
|
|
|
|
gint64 next_timeout_abs_ns = 0;
|
all: don't use gchar/gshort/gint/glong but C types
We commonly don't use the glib typedefs for char/short/int/long,
but their C types directly.
$ git grep '\<g\(char\|short\|int\|long\|float\|double\)\>' | wc -l
587
$ git grep '\<\(char\|short\|int\|long\|float\|double\)\>' | wc -l
21114
One could argue that using the glib typedefs is preferable in
public API (of our glib based libnm library) or where it clearly
is related to glib, like during
g_object_set (obj, PROPERTY, (gint) value, NULL);
However, that argument does not seem strong, because in practice we don't
follow that argument today, and seldomly use the glib typedefs.
Also, the style guide for this would be hard to formalize, because
"using them where clearly related to a glib" is a very loose suggestion.
Also note that glib typedefs will always just be typedefs of the
underlying C types. There is no danger of glib changing the meaning
of these typedefs (because that would be a major API break of glib).
A simple style guide is instead: don't use these typedefs.
No manual actions, I only ran the bash script:
FILES=($(git ls-files '*.[hc]'))
sed -i \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>\( [^ ]\)/\1\2/g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\> /\1 /g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>/\1/g' \
"${FILES[@]}"
2018-07-11 07:40:19 +02:00
|
|
|
int now_ns = 0;
|
2018-02-19 13:42:03 +01:00
|
|
|
|
|
|
|
|
for (i = 0; i < priv->delayed_action.list_wait_for_nl_response->len; ) {
|
|
|
|
|
const DelayedActionWaitForNlResponseData *data = &g_array_index (priv->delayed_action.list_wait_for_nl_response, DelayedActionWaitForNlResponseData, i);
|
|
|
|
|
|
|
|
|
|
if (data->seq_result)
|
|
|
|
|
delayed_action_wait_for_nl_response_complete (platform, i, data->seq_result);
|
|
|
|
|
else if ( p_now_ns
|
|
|
|
|
&& ((now_ns ?: (now_ns = nm_utils_get_monotonic_timestamp_ns ())) >= data->timeout_abs_ns)) {
|
|
|
|
|
/* the caller can optionally check for timeout by providing a p_now_ns argument. */
|
|
|
|
|
delayed_action_wait_for_nl_response_complete (platform, i, WAIT_FOR_NL_RESPONSE_RESULT_FAILED_TIMEOUT);
|
|
|
|
|
} else if (force_result != WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN)
|
|
|
|
|
delayed_action_wait_for_nl_response_complete (platform, i, force_result);
|
|
|
|
|
else {
|
|
|
|
|
if ( next_seq_number == 0
|
|
|
|
|
|| next_timeout_abs_ns > data->timeout_abs_ns) {
|
|
|
|
|
next_seq_number = data->seq_number;
|
|
|
|
|
next_timeout_abs_ns = data->timeout_abs_ns;
|
|
|
|
|
}
|
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-12-14 14:47:41 +01:00
|
|
|
|
2018-02-19 13:42:03 +01:00
|
|
|
if (force_result != WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN) {
|
|
|
|
|
nm_assert (!NM_FLAGS_HAS (priv->delayed_action.flags, DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE));
|
|
|
|
|
nm_assert (priv->delayed_action.list_wait_for_nl_response->len == 0);
|
|
|
|
|
}
|
2016-01-24 18:46:14 +01:00
|
|
|
|
2018-02-19 13:42:03 +01:00
|
|
|
NM_SET_OUT (out_next_seq_number, next_seq_number);
|
|
|
|
|
NM_SET_OUT (out_next_timeout_abs_ns, next_timeout_abs_ns);
|
|
|
|
|
NM_SET_OUT (p_now_ns, now_ns);
|
|
|
|
|
}
|
2016-01-24 18:46:14 +01:00
|
|
|
|
2018-02-19 13:42:03 +01:00
|
|
|
static void
|
|
|
|
|
delayed_action_wait_for_nl_response_complete_all (NMPlatform *platform,
|
|
|
|
|
WaitForNlResponseResult fallback_result)
|
|
|
|
|
{
|
|
|
|
|
delayed_action_wait_for_nl_response_complete_check (platform,
|
|
|
|
|
fallback_result,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL);
|
2015-12-14 14:47:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2015-04-06 18:29:36 +02:00
|
|
|
static void
|
|
|
|
|
delayed_action_handle_MASTER_CONNECTED (NMPlatform *platform, int master_ifindex)
|
|
|
|
|
{
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
nm_auto_nmpobj const NMPObject *obj_old = NULL;
|
|
|
|
|
nm_auto_nmpobj const NMPObject *obj_new = NULL;
|
2015-04-06 18:29:36 +02:00
|
|
|
NMPCacheOpsType cache_op;
|
|
|
|
|
|
2017-06-29 11:18:10 +02:00
|
|
|
cache_op = nmp_cache_update_link_master_connected (nm_platform_get_cache (platform), master_ifindex, &obj_old, &obj_new);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
if (cache_op == NMP_CACHE_OPS_UNCHANGED)
|
|
|
|
|
return;
|
|
|
|
|
cache_on_change (platform, cache_op, obj_old, obj_new);
|
2017-06-29 13:13:54 +02:00
|
|
|
nm_platform_cache_update_emit_signal (platform, cache_op, obj_old, obj_new);
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
delayed_action_handle_REFRESH_LINK (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2015-12-14 14:47:41 +01:00
|
|
|
do_request_link_no_delayed_actions (platform, ifindex, NULL);
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
delayed_action_handle_REFRESH_ALL (NMPlatform *platform, DelayedActionType flags)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
2015-12-14 14:47:41 +01:00
|
|
|
do_request_all_no_delayed_actions (platform, flags);
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
delayed_action_handle_READ_NETLINK (NMPlatform *platform)
|
2015-12-14 14:47:41 +01:00
|
|
|
{
|
2015-12-15 10:40:41 +01:00
|
|
|
event_handler_read_netlink (platform, FALSE);
|
2015-12-14 14:47:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
delayed_action_handle_WAIT_FOR_NL_RESPONSE (NMPlatform *platform)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
2015-12-15 10:40:41 +01:00
|
|
|
event_handler_read_netlink (platform, TRUE);
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
delayed_action_handle_one (NMPlatform *platform)
|
|
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
gpointer user_data;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-15 10:55:27 +01:00
|
|
|
if (priv->delayed_action.flags == DELAYED_ACTION_TYPE_NONE)
|
2015-04-06 18:29:36 +02:00
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
/* First process DELAYED_ACTION_TYPE_MASTER_CONNECTED actions.
|
|
|
|
|
* This type of action is entirely cache-internal and is here to resolve a
|
|
|
|
|
* cache inconsistency. It should be fixed right away. */
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
if (NM_FLAGS_HAS (priv->delayed_action.flags, DELAYED_ACTION_TYPE_MASTER_CONNECTED)) {
|
|
|
|
|
nm_assert (priv->delayed_action.list_master_connected->len > 0);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
user_data = priv->delayed_action.list_master_connected->pdata[0];
|
|
|
|
|
g_ptr_array_remove_index_fast (priv->delayed_action.list_master_connected, 0);
|
|
|
|
|
if (priv->delayed_action.list_master_connected->len == 0)
|
|
|
|
|
priv->delayed_action.flags &= ~DELAYED_ACTION_TYPE_MASTER_CONNECTED;
|
2016-09-23 15:03:41 +02:00
|
|
|
nm_assert (_nm_utils_ptrarray_find_first ((gconstpointer *) priv->delayed_action.list_master_connected->pdata, priv->delayed_action.list_master_connected->len, user_data) < 0);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-11-06 12:58:55 +01:00
|
|
|
_LOGt_delayed_action (DELAYED_ACTION_TYPE_MASTER_CONNECTED, user_data, "handle");
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
delayed_action_handle_MASTER_CONNECTED (platform, GPOINTER_TO_INT (user_data));
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
nm_assert (priv->delayed_action.list_master_connected->len == 0);
|
|
|
|
|
|
|
|
|
|
/* Next we prefer read-netlink, because the buffer size is limited and we want to process events
|
|
|
|
|
* from netlink early. */
|
|
|
|
|
if (NM_FLAGS_HAS (priv->delayed_action.flags, DELAYED_ACTION_TYPE_READ_NETLINK)) {
|
2015-11-06 12:58:55 +01:00
|
|
|
_LOGt_delayed_action (DELAYED_ACTION_TYPE_READ_NETLINK, NULL, "handle");
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
priv->delayed_action.flags &= ~DELAYED_ACTION_TYPE_READ_NETLINK;
|
|
|
|
|
delayed_action_handle_READ_NETLINK (platform);
|
2015-04-06 18:29:36 +02:00
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-17 18:24:57 +01:00
|
|
|
if (NM_FLAGS_ANY (priv->delayed_action.flags, DELAYED_ACTION_TYPE_REFRESH_ALL)) {
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
DelayedActionType flags, iflags;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
flags = priv->delayed_action.flags & DELAYED_ACTION_TYPE_REFRESH_ALL;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
priv->delayed_action.flags &= ~DELAYED_ACTION_TYPE_REFRESH_ALL;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-11-06 12:58:55 +01:00
|
|
|
if (_LOGt_ENABLED ()) {
|
2016-04-07 17:14:03 +02:00
|
|
|
FOR_EACH_DELAYED_ACTION (iflags, flags) {
|
|
|
|
|
_LOGt_delayed_action (iflags, NULL, "handle");
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
delayed_action_handle_REFRESH_ALL (platform, flags);
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-17 18:24:57 +01:00
|
|
|
if (NM_FLAGS_HAS (priv->delayed_action.flags, DELAYED_ACTION_TYPE_REFRESH_LINK)) {
|
2015-12-14 14:47:41 +01:00
|
|
|
nm_assert (priv->delayed_action.list_refresh_link->len > 0);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
user_data = priv->delayed_action.list_refresh_link->pdata[0];
|
|
|
|
|
g_ptr_array_remove_index_fast (priv->delayed_action.list_refresh_link, 0);
|
|
|
|
|
if (priv->delayed_action.list_refresh_link->len == 0)
|
|
|
|
|
priv->delayed_action.flags &= ~DELAYED_ACTION_TYPE_REFRESH_LINK;
|
2016-09-23 15:03:41 +02:00
|
|
|
nm_assert (_nm_utils_ptrarray_find_first ((gconstpointer *) priv->delayed_action.list_refresh_link->pdata, priv->delayed_action.list_refresh_link->len, user_data) < 0);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
_LOGt_delayed_action (DELAYED_ACTION_TYPE_REFRESH_LINK, user_data, "handle");
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
delayed_action_handle_REFRESH_LINK (platform, GPOINTER_TO_INT (user_data));
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (NM_FLAGS_HAS (priv->delayed_action.flags, DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE)) {
|
|
|
|
|
nm_assert (priv->delayed_action.list_wait_for_nl_response->len > 0);
|
|
|
|
|
_LOGt_delayed_action (DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE, NULL, "handle");
|
|
|
|
|
delayed_action_handle_WAIT_FOR_NL_RESPONSE (platform);
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return FALSE;
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
delayed_action_handle_all (NMPlatform *platform, gboolean read_netlink)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
gboolean any = FALSE;
|
|
|
|
|
|
2015-12-17 18:24:57 +01:00
|
|
|
g_return_val_if_fail (priv->delayed_action.is_handling == 0, FALSE);
|
|
|
|
|
|
2015-04-06 18:29:36 +02:00
|
|
|
priv->delayed_action.is_handling++;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
if (read_netlink)
|
|
|
|
|
delayed_action_schedule (platform, DELAYED_ACTION_TYPE_READ_NETLINK, NULL);
|
2015-04-06 18:29:36 +02:00
|
|
|
while (delayed_action_handle_one (platform))
|
|
|
|
|
any = TRUE;
|
|
|
|
|
priv->delayed_action.is_handling--;
|
2015-12-14 14:47:41 +01:00
|
|
|
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
cache_prune_all (platform);
|
2015-12-14 14:47:41 +01:00
|
|
|
|
2015-04-06 18:29:36 +02:00
|
|
|
return any;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
delayed_action_schedule (NMPlatform *platform, DelayedActionType action_type, gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
DelayedActionType iflags;
|
|
|
|
|
|
|
|
|
|
nm_assert (action_type != DELAYED_ACTION_TYPE_NONE);
|
|
|
|
|
|
2015-12-14 12:09:50 +01:00
|
|
|
switch (action_type) {
|
|
|
|
|
case DELAYED_ACTION_TYPE_REFRESH_LINK:
|
2016-09-23 15:03:41 +02:00
|
|
|
if (_nm_utils_ptrarray_find_first ((gconstpointer *) priv->delayed_action.list_refresh_link->pdata, priv->delayed_action.list_refresh_link->len, user_data) < 0)
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
g_ptr_array_add (priv->delayed_action.list_refresh_link, user_data);
|
2015-12-14 12:09:50 +01:00
|
|
|
break;
|
|
|
|
|
case DELAYED_ACTION_TYPE_MASTER_CONNECTED:
|
2016-09-23 15:03:41 +02:00
|
|
|
if (_nm_utils_ptrarray_find_first ((gconstpointer *) priv->delayed_action.list_master_connected->pdata, priv->delayed_action.list_master_connected->len, user_data) < 0)
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
g_ptr_array_add (priv->delayed_action.list_master_connected, user_data);
|
2015-12-14 12:09:50 +01:00
|
|
|
break;
|
2015-12-14 14:47:41 +01:00
|
|
|
case DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE:
|
|
|
|
|
g_array_append_vals (priv->delayed_action.list_wait_for_nl_response, user_data, 1);
|
|
|
|
|
break;
|
2015-12-14 12:09:50 +01:00
|
|
|
default:
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
nm_assert (!user_data);
|
2015-12-14 12:09:50 +01:00
|
|
|
nm_assert (!NM_FLAGS_HAS (action_type, DELAYED_ACTION_TYPE_REFRESH_LINK));
|
|
|
|
|
nm_assert (!NM_FLAGS_HAS (action_type, DELAYED_ACTION_TYPE_MASTER_CONNECTED));
|
2015-12-14 14:47:41 +01:00
|
|
|
nm_assert (!NM_FLAGS_HAS (action_type, DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE));
|
2015-12-14 12:09:50 +01:00
|
|
|
break;
|
|
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
|
|
|
|
priv->delayed_action.flags |= action_type;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-11-06 12:58:55 +01:00
|
|
|
if (_LOGt_ENABLED ()) {
|
2016-04-07 17:14:03 +02:00
|
|
|
FOR_EACH_DELAYED_ACTION (iflags, action_type) {
|
|
|
|
|
_LOGt_delayed_action (iflags, user_data, "schedule");
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
}
|
|
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
static void
|
|
|
|
|
delayed_action_schedule_WAIT_FOR_NL_RESPONSE (NMPlatform *platform,
|
|
|
|
|
guint32 seq_number,
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
WaitForNlResponseResult *out_seq_result,
|
2018-03-09 15:50:16 +01:00
|
|
|
char **out_errmsg,
|
2017-08-16 16:13:24 +02:00
|
|
|
DelayedActionWaitForNlResponseType response_type,
|
|
|
|
|
gpointer response_out_data)
|
2015-12-14 14:47:41 +01:00
|
|
|
{
|
|
|
|
|
DelayedActionWaitForNlResponseData data = {
|
|
|
|
|
.seq_number = seq_number,
|
|
|
|
|
.timeout_abs_ns = nm_utils_get_monotonic_timestamp_ns () + (200 * (NM_UTILS_NS_PER_SECOND / 1000)),
|
|
|
|
|
.out_seq_result = out_seq_result,
|
2018-03-09 15:50:16 +01:00
|
|
|
.out_errmsg = out_errmsg,
|
2017-08-16 16:13:24 +02:00
|
|
|
.response_type = response_type,
|
|
|
|
|
.response.out_data = response_out_data,
|
2015-12-14 14:47:41 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
delayed_action_schedule (platform,
|
|
|
|
|
DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE,
|
|
|
|
|
&data);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-04-06 18:29:36 +02:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
static void
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
cache_prune_one_type (NMPlatform *platform, NMPObjectType obj_type)
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
{
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
NMDedupMultiIter iter;
|
|
|
|
|
const NMPObject *obj;
|
|
|
|
|
NMPCacheOpsType cache_op;
|
|
|
|
|
NMPLookup lookup;
|
2017-06-29 11:18:10 +02:00
|
|
|
NMPCache *cache = nm_platform_get_cache (platform);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
|
|
|
|
|
nmp_lookup_init_obj_type (&lookup,
|
2017-07-04 11:44:27 +02:00
|
|
|
obj_type);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
nm_dedup_multi_iter_init (&iter,
|
2017-06-29 11:18:10 +02:00
|
|
|
nmp_cache_lookup (cache,
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
&lookup));
|
|
|
|
|
while (nm_dedup_multi_iter_next (&iter)) {
|
|
|
|
|
if (iter.current->dirty) {
|
|
|
|
|
nm_auto_nmpobj const NMPObject *obj_old = NULL;
|
|
|
|
|
|
core: remove NMDedupMultiBox object and track NMDedupMultiObj instances directly
Implement the reference counting of NMPObject as part of
NMDedupMultiObj and get rid of NMDedupMultiBox.
With this change, the NMPObject is aware in which NMDedupMultiIndex
instance it is tracked.
- this saves an additional GSlice allocation for the NMDedupMultiBox.
- it is immediately known, whether an NMPObject is tracked by a
certain NMDedupMultiIndex or not. This saves an additional hash
lookup.
- previously, when all idx-types cease to reference an NMDedupMultiObj
instance, it was removed. Now, a tracked objects stays in the
NMDedupMultiIndex until it's last reference is deleted. This possibly
extends the lifetime of the object and we may reuse it better.
- it is no longer possible to add one object to more then one
NMDedupMultiIndex instance. As we anyway want to have only one
instance to deduplicate the objects, this is fine.
- the ref-counting implementation is now part of NMDedupMultiObj.
Previously, NMDedupMultiIndex could also track objects that were
not ref-counted. Hoever, the object anyway *must* implement the
NMDedupMultiObj API, so this flexibility is unneeded and was not
used.
- a downside is, that NMPObject grows by one pointer size, even if
it isn't tracked in the NMDedupMultiIndex. But we really want to
put all objects into the index for sharing and deduplication. So
this downside should be acceptable. Still, code like
nmp_object_stackinit*() needs to handle a larger object.
2017-07-02 23:46:06 +02:00
|
|
|
obj = iter.current->obj;
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
_LOGt ("cache-prune: prune %s", nmp_object_to_string (obj, NMP_OBJECT_TO_STRING_ALL, NULL, 0));
|
platform: fix cache to use kernel's notion for equality of routes
Until now, NetworkManager's platform cache for routes used the quadruple
network/plen,metric,ifindex for equaliy. That is not kernel's
understanding of how routes behave. For example, with `ip route append`
you can add two IPv4 routes that only differ by their gateway. To
the previous form of platform cache, these two routes would wrongly
look identical, as the cache could not contain both routes. This also
easily leads to cache-inconsistencies.
Now that we have NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID, fix the route's
compare operator to match kernel's.
Well, not entirely. Kernel understands more properties for routes then
NetworkManager. Some of these properties may also be part of the ID according
to kernel. To NetworkManager such routes would still look identical as
they only differ in a property that is not understood. This can still
cause cache-inconsistencies. The only fix here is to add support for
all these properties in NetworkManager as well. However, it's less serious,
because with this commit we support several of the more important properties.
See also the related bug rh#1337855 for kernel.
Another difficulty is that `ip route replace` and `ip route change`
changes an existing route. The replaced route has the same
NM_PLATFORM_IP_ROUTE_CMP_TYPE_WEAK_ID, but differ in the actual
NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID:
# ip -d -4 route show dev v
# ip monitor route &
# ip route add 192.168.5.0/24 dev v
192.168.5.0/24 dev v scope link
# ip route change 192.168.5.0/24 dev v scope 10
192.168.5.0/24 dev v scope 10
# ip -d -4 route show dev v
unicast 192.168.5.0/24 proto boot scope 10
Note that we only got one RTM_NEWROUTE message, although from NMPCache's
point of view, a new route (with a particular ID) was added and another
route (with a different ID) was deleted. The cumbersome workaround is,
to keep an ordered list of the routes, and figure out which route was
replaced in response to an RTM_NEWROUTE. In absence of bugs, this should
work fine. However, as we only rely on events, we might wrongly
introduce a cache-inconsistancy as well. See the related bug rh#1337860.
Also drop nm_platform_ip4_route_get() and the like. The ID of routes
is complex, so it makes little sense to look up a route directly.
2017-08-02 07:55:05 +02:00
|
|
|
cache_op = nmp_cache_remove (cache, obj, TRUE, TRUE, &obj_old);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
nm_assert (cache_op == NMP_CACHE_OPS_REMOVED);
|
|
|
|
|
cache_on_change (platform, cache_op, obj_old, NULL);
|
2017-06-29 13:13:54 +02:00
|
|
|
nm_platform_cache_update_emit_signal (platform, cache_op, obj_old, NULL);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
cache_prune_all (NMPlatform *platform)
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
DelayedActionType iflags, action_type;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
action_type = DELAYED_ACTION_TYPE_REFRESH_ALL;
|
|
|
|
|
FOR_EACH_DELAYED_ACTION (iflags, action_type) {
|
|
|
|
|
bool *p = &priv->pruning[delayed_action_refresh_all_to_idx (iflags)];
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
if (*p) {
|
|
|
|
|
*p = FALSE;
|
|
|
|
|
cache_prune_one_type (platform, delayed_action_refresh_to_object_type (iflags));
|
|
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-06 18:29:36 +02:00
|
|
|
static void
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
cache_on_change (NMPlatform *platform,
|
|
|
|
|
NMPCacheOpsType cache_op,
|
|
|
|
|
const NMPObject *obj_old,
|
|
|
|
|
const NMPObject *obj_new)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
|
|
|
|
const NMPClass *klass;
|
2015-10-27 10:25:22 +01:00
|
|
|
char str_buf[sizeof (_nm_utils_to_string_buffer)];
|
|
|
|
|
char str_buf2[sizeof (_nm_utils_to_string_buffer)];
|
2017-06-29 11:18:10 +02:00
|
|
|
NMPCache *cache = nm_platform_get_cache (platform);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2017-06-29 11:18:10 +02:00
|
|
|
ASSERT_nmp_cache_ops (cache, cache_op, obj_old, obj_new);
|
2017-08-05 15:14:44 +02:00
|
|
|
nm_assert (cache_op != NMP_CACHE_OPS_UNCHANGED);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
klass = obj_old ? NMP_OBJECT_GET_CLASS (obj_old) : NMP_OBJECT_GET_CLASS (obj_new);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-11-06 12:58:55 +01:00
|
|
|
_LOGt ("update-cache-%s: %s: %s%s%s",
|
2015-04-06 18:29:36 +02:00
|
|
|
klass->obj_type_name,
|
2017-06-23 15:46:40 +02:00
|
|
|
(cache_op == NMP_CACHE_OPS_UPDATED
|
2015-04-06 18:29:36 +02:00
|
|
|
? "UPDATE"
|
2017-06-23 15:46:40 +02:00
|
|
|
: (cache_op == NMP_CACHE_OPS_REMOVED
|
2015-04-06 18:29:36 +02:00
|
|
|
? "REMOVE"
|
2017-06-23 15:46:40 +02:00
|
|
|
: (cache_op == NMP_CACHE_OPS_ADDED) ? "ADD" : "???")),
|
|
|
|
|
(cache_op != NMP_CACHE_OPS_ADDED
|
|
|
|
|
? nmp_object_to_string (obj_old, NMP_OBJECT_TO_STRING_ALL, str_buf2, sizeof (str_buf2))
|
|
|
|
|
: nmp_object_to_string (obj_new, NMP_OBJECT_TO_STRING_ALL, str_buf2, sizeof (str_buf2))),
|
|
|
|
|
(cache_op == NMP_CACHE_OPS_UPDATED) ? " -> " : "",
|
|
|
|
|
(cache_op == NMP_CACHE_OPS_UPDATED
|
|
|
|
|
? nmp_object_to_string (obj_new, NMP_OBJECT_TO_STRING_ALL, str_buf, sizeof (str_buf))
|
2015-04-06 18:29:36 +02:00
|
|
|
: ""));
|
|
|
|
|
|
|
|
|
|
switch (klass->obj_type) {
|
2015-06-19 16:24:18 +02:00
|
|
|
case NMP_OBJECT_TYPE_LINK:
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
|
|
|
|
/* check whether changing a slave link can cause a master link (bridge or bond) to go up/down */
|
2017-06-23 15:46:40 +02:00
|
|
|
if ( obj_old
|
2017-06-29 11:18:10 +02:00
|
|
|
&& nmp_cache_link_connected_needs_toggle_by_ifindex (cache, obj_old->link.master, obj_new, obj_old))
|
2017-06-23 15:46:40 +02:00
|
|
|
delayed_action_schedule (platform, DELAYED_ACTION_TYPE_MASTER_CONNECTED, GINT_TO_POINTER (obj_old->link.master));
|
|
|
|
|
if ( obj_new
|
|
|
|
|
&& (!obj_old || obj_old->link.master != obj_new->link.master)
|
2017-06-29 11:18:10 +02:00
|
|
|
&& nmp_cache_link_connected_needs_toggle_by_ifindex (cache, obj_new->link.master, obj_new, obj_old))
|
2017-06-23 15:46:40 +02:00
|
|
|
delayed_action_schedule (platform, DELAYED_ACTION_TYPE_MASTER_CONNECTED, GINT_TO_POINTER (obj_new->link.master));
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
{
|
|
|
|
|
/* check whether we are about to change a master link that needs toggling connected state. */
|
2017-06-23 15:46:40 +02:00
|
|
|
if ( obj_new /* <-- nonsensical, make coverity happy */
|
2017-06-29 11:18:10 +02:00
|
|
|
&& nmp_cache_link_connected_needs_toggle (cache, obj_new, obj_new, obj_old))
|
2017-06-23 15:46:40 +02:00
|
|
|
delayed_action_schedule (platform, DELAYED_ACTION_TYPE_MASTER_CONNECTED, GINT_TO_POINTER (obj_new->link.ifindex));
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
{
|
|
|
|
|
int ifindex = 0;
|
|
|
|
|
|
2017-11-15 20:36:35 +01:00
|
|
|
/* if we remove a link (from netlink), we must refresh the addresses, routes, qdiscs and tfilters */
|
2017-06-23 15:46:40 +02:00
|
|
|
if ( cache_op == NMP_CACHE_OPS_REMOVED
|
|
|
|
|
&& obj_old /* <-- nonsensical, make coverity happy */)
|
|
|
|
|
ifindex = obj_old->link.ifindex;
|
|
|
|
|
else if ( cache_op == NMP_CACHE_OPS_UPDATED
|
|
|
|
|
&& obj_old && obj_new /* <-- nonsensical, make coverity happy */
|
|
|
|
|
&& !obj_new->_link.netlink.is_in_netlink
|
|
|
|
|
&& obj_new->_link.netlink.is_in_netlink != obj_old->_link.netlink.is_in_netlink)
|
|
|
|
|
ifindex = obj_new->link.ifindex;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
|
|
|
|
if (ifindex > 0) {
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
delayed_action_schedule (platform,
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES |
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_QDISCS |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_TFILTERS,
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
NULL);
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
}
|
2015-09-14 15:05:00 +02:00
|
|
|
{
|
|
|
|
|
int ifindex = -1;
|
|
|
|
|
|
|
|
|
|
/* removal of a link could be caused by moving the link to another netns.
|
|
|
|
|
* In this case, we potentially have to update other links that have this link as parent.
|
2015-11-26 12:02:29 +01:00
|
|
|
* Currently, kernel misses to sent us a notification in this case
|
|
|
|
|
* (https://bugzilla.redhat.com/show_bug.cgi?id=1262908). */
|
2015-09-14 15:05:00 +02:00
|
|
|
|
2017-06-23 15:46:40 +02:00
|
|
|
if ( cache_op == NMP_CACHE_OPS_REMOVED
|
|
|
|
|
&& obj_old /* <-- nonsensical, make coverity happy */
|
|
|
|
|
&& obj_old->_link.netlink.is_in_netlink)
|
|
|
|
|
ifindex = obj_old->link.ifindex;
|
|
|
|
|
else if ( cache_op == NMP_CACHE_OPS_UPDATED
|
|
|
|
|
&& obj_old && obj_new /* <-- nonsensical, make coverity happy */
|
|
|
|
|
&& obj_old->_link.netlink.is_in_netlink
|
|
|
|
|
&& !obj_new->_link.netlink.is_in_netlink)
|
|
|
|
|
ifindex = obj_new->link.ifindex;
|
2015-09-14 15:05:00 +02:00
|
|
|
|
|
|
|
|
if (ifindex > 0) {
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
NMPLookup lookup;
|
|
|
|
|
NMDedupMultiIter iter;
|
|
|
|
|
const NMPlatformLink *l;
|
|
|
|
|
|
2017-07-04 11:44:27 +02:00
|
|
|
nmp_lookup_init_obj_type (&lookup, NMP_OBJECT_TYPE_LINK);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
nmp_cache_iter_for_each_link (&iter,
|
2017-06-29 11:18:10 +02:00
|
|
|
nmp_cache_lookup (cache, &lookup),
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
&l) {
|
|
|
|
|
if (l->parent == ifindex)
|
|
|
|
|
delayed_action_schedule (platform, DELAYED_ACTION_TYPE_REFRESH_LINK, GINT_TO_POINTER (l->ifindex));
|
2015-09-14 15:05:00 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
|
|
|
|
/* if a link goes down, we must refresh routes */
|
2017-06-23 15:46:40 +02:00
|
|
|
if ( cache_op == NMP_CACHE_OPS_UPDATED
|
|
|
|
|
&& obj_old && obj_new /* <-- nonsensical, make coverity happy */
|
|
|
|
|
&& obj_old->_link.netlink.is_in_netlink
|
|
|
|
|
&& obj_new->_link.netlink.is_in_netlink
|
|
|
|
|
&& ( ( NM_FLAGS_HAS (obj_old->link.n_ifi_flags, IFF_UP)
|
|
|
|
|
&& !NM_FLAGS_HAS (obj_new->link.n_ifi_flags, IFF_UP))
|
|
|
|
|
|| ( NM_FLAGS_HAS (obj_old->link.n_ifi_flags, IFF_LOWER_UP)
|
|
|
|
|
&& !NM_FLAGS_HAS (obj_new->link.n_ifi_flags, IFF_LOWER_UP)))) {
|
2016-04-15 20:53:15 +02:00
|
|
|
/* FIXME: I suspect that IFF_LOWER_UP must not be considered, and I
|
|
|
|
|
* think kernel does send RTM_DELROUTE events for IPv6 routes, so
|
|
|
|
|
* we might not need to refresh IPv6 routes. */
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
delayed_action_schedule (platform,
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES,
|
|
|
|
|
NULL);
|
|
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
2017-06-23 15:46:40 +02:00
|
|
|
if ( NM_IN_SET (cache_op, NMP_CACHE_OPS_ADDED, NMP_CACHE_OPS_UPDATED)
|
|
|
|
|
&& (obj_new && obj_new->_link.netlink.is_in_netlink)
|
|
|
|
|
&& (!obj_old || !obj_old->_link.netlink.is_in_netlink))
|
2015-11-26 15:48:32 +01:00
|
|
|
{
|
2018-04-05 15:10:39 +02:00
|
|
|
gboolean re_request_link = FALSE;
|
2018-04-05 15:14:10 +02:00
|
|
|
const NMPlatformLnkTun *lnk_tun;
|
2018-04-05 15:10:39 +02:00
|
|
|
|
|
|
|
|
if ( !obj_new->_link.netlink.lnk
|
|
|
|
|
&& NM_IN_SET (obj_new->link.type, NM_LINK_TYPE_GRE,
|
2018-06-26 10:45:35 +02:00
|
|
|
NM_LINK_TYPE_GRETAP,
|
2018-04-05 15:10:39 +02:00
|
|
|
NM_LINK_TYPE_IP6TNL,
|
2018-06-26 12:06:43 +02:00
|
|
|
NM_LINK_TYPE_IP6GRE,
|
|
|
|
|
NM_LINK_TYPE_IP6GRETAP,
|
2018-04-05 15:10:39 +02:00
|
|
|
NM_LINK_TYPE_INFINIBAND,
|
|
|
|
|
NM_LINK_TYPE_MACVLAN,
|
|
|
|
|
NM_LINK_TYPE_MACVLAN,
|
|
|
|
|
NM_LINK_TYPE_SIT,
|
2018-04-05 11:35:55 +02:00
|
|
|
NM_LINK_TYPE_TUN,
|
2018-04-05 15:10:39 +02:00
|
|
|
NM_LINK_TYPE_VLAN,
|
|
|
|
|
NM_LINK_TYPE_VXLAN)) {
|
2015-11-26 15:48:32 +01:00
|
|
|
/* certain link-types also come with a IFLA_INFO_DATA/lnk_data. It may happen that
|
|
|
|
|
* kernel didn't send this notification, thus when we first learn about a link
|
|
|
|
|
* that lacks an lnk_data we re-request it again.
|
|
|
|
|
*
|
|
|
|
|
* For example https://bugzilla.redhat.com/show_bug.cgi?id=1284001 */
|
2018-04-05 15:10:39 +02:00
|
|
|
re_request_link = TRUE;
|
2018-04-05 15:14:10 +02:00
|
|
|
} else if ( obj_new->link.type == NM_LINK_TYPE_TUN
|
|
|
|
|
&& obj_new->_link.netlink.lnk
|
|
|
|
|
&& (lnk_tun = &(obj_new->_link.netlink.lnk)->lnk_tun)
|
|
|
|
|
&& !lnk_tun->persist
|
|
|
|
|
&& lnk_tun->pi
|
|
|
|
|
&& !lnk_tun->vnet_hdr
|
|
|
|
|
&& !lnk_tun->multi_queue
|
|
|
|
|
&& !lnk_tun->owner_valid
|
|
|
|
|
&& !lnk_tun->group_valid) {
|
|
|
|
|
/* kernel has/had a know issue that the first notification for TUN device would
|
|
|
|
|
* be sent with invalid parameters. The message looks like that kind, so refetch
|
|
|
|
|
* it. */
|
|
|
|
|
re_request_link = TRUE;
|
2018-04-05 15:10:39 +02:00
|
|
|
} else if ( obj_new->link.type == NM_LINK_TYPE_VETH
|
|
|
|
|
&& obj_new->link.parent == 0) {
|
2015-11-26 12:02:29 +01:00
|
|
|
/* the initial notification when adding a veth pair can lack the parent/IFLA_LINK
|
|
|
|
|
* (https://bugzilla.redhat.com/show_bug.cgi?id=1285827).
|
|
|
|
|
* Request it again. */
|
2018-04-05 15:10:39 +02:00
|
|
|
re_request_link = TRUE;
|
|
|
|
|
} else if ( obj_new->link.type == NM_LINK_TYPE_ETHERNET
|
|
|
|
|
&& obj_new->link.addr.len == 0) {
|
2016-01-26 21:32:07 +01:00
|
|
|
/* Due to a kernel bug, we sometimes receive spurious NEWLINK
|
|
|
|
|
* messages after a wifi interface has disappeared. Since the
|
|
|
|
|
* link is not present anymore we can't determine its type and
|
|
|
|
|
* thus it will show up as a Ethernet one, with no address
|
|
|
|
|
* specified. Request the link again to check if it really
|
|
|
|
|
* exists. https://bugzilla.redhat.com/show_bug.cgi?id=1302037
|
|
|
|
|
*/
|
2018-04-05 15:10:39 +02:00
|
|
|
re_request_link = TRUE;
|
|
|
|
|
}
|
|
|
|
|
if (re_request_link) {
|
2016-01-26 21:32:07 +01:00
|
|
|
delayed_action_schedule (platform,
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_LINK,
|
2017-06-23 15:46:40 +02:00
|
|
|
GINT_TO_POINTER (obj_new->link.ifindex));
|
2016-01-26 21:32:07 +01:00
|
|
|
}
|
2015-11-26 15:48:32 +01:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
|
|
|
|
/* on enslave/release, we also refresh the master. */
|
|
|
|
|
int ifindex1 = 0, ifindex2 = 0;
|
|
|
|
|
gboolean changed_master, changed_connected;
|
|
|
|
|
|
2017-06-23 15:46:40 +02:00
|
|
|
changed_master = (obj_new && obj_new->_link.netlink.is_in_netlink && obj_new->link.master > 0 ? obj_new->link.master : 0)
|
|
|
|
|
!= (obj_old && obj_old->_link.netlink.is_in_netlink && obj_old->link.master > 0 ? obj_old->link.master : 0);
|
|
|
|
|
changed_connected = (obj_new && obj_new->_link.netlink.is_in_netlink ? NM_FLAGS_HAS (obj_new->link.n_ifi_flags, IFF_LOWER_UP) : 2)
|
|
|
|
|
!= (obj_old && obj_old->_link.netlink.is_in_netlink ? NM_FLAGS_HAS (obj_old->link.n_ifi_flags, IFF_LOWER_UP) : 2);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
|
|
|
|
if (changed_master || changed_connected) {
|
2017-06-23 15:46:40 +02:00
|
|
|
ifindex1 = (obj_old && obj_old->_link.netlink.is_in_netlink && obj_old->link.master > 0) ? obj_old->link.master : 0;
|
|
|
|
|
ifindex2 = (obj_new && obj_new->_link.netlink.is_in_netlink && obj_new->link.master > 0) ? obj_new->link.master : 0;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
|
|
|
|
if (ifindex1 > 0)
|
|
|
|
|
delayed_action_schedule (platform, DELAYED_ACTION_TYPE_REFRESH_LINK, GINT_TO_POINTER (ifindex1));
|
|
|
|
|
if (ifindex2 > 0 && ifindex1 != ifindex2)
|
|
|
|
|
delayed_action_schedule (platform, DELAYED_ACTION_TYPE_REFRESH_LINK, GINT_TO_POINTER (ifindex2));
|
|
|
|
|
}
|
platform: cope differently with spurious RTM_DELLINK message when unslaving bridge-slave
Unslaving from a bridge causes a wrong RTM_DELLINK event for
the former slave.
# ip link add dummy0 type dummy
# ip link add bridge0 type bridge
# ip link set bridge0 up
# ip link set dummy0 master bridge0
# ip monitor link &
# ip link set dummy0 nomaster
18: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop master bridge0 state DOWN group default
link/ether 76:44:5f:b9:38:02 brd ff:ff:ff:ff:ff:ff
18: dummy0: <BROADCAST,NOARP> mtu 1500 master bridge0 state DOWN
link/ether 76:44:5f:b9:38:02
Deleted 18: dummy0: <BROADCAST,NOARP> mtu 1500 master bridge0 state DOWN
link/ether 76:44:5f:b9:38:02
18: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
link/ether 76:44:5f:b9:38:02 brd ff:ff:ff:ff:ff:ff
19: bridge0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
19: bridge0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
Previously, during do_request_link() we would remember the link that is
about to be requested (delayed_deletion) and delay processing a new
RTM_DELLINK message until the end of do_request_link() -- and possibly
forget about about the deletion, if RTM_DELLINK was followed by a
RTM_NEWLINK.
However, this hack does not catch the case where an external command
unslaves the link.
Instead just accept the wrong event and raise a "removed" signal right
away. This brings the cache in an externally visible, wrong state that
will be fixed by a following "added" signal.
Still do that because working around the kernel bug is complicated. Also,
we already might emit wrong "added" signals for devices that are already
removed. As a consequence, a user should not consider the platform signals
until all events are processed.
Listeners to that signal should accept that added/removed link changes
can be wrong and should preferably handle them idly, when the events
have settled.
It can even be worse, that a RTM_DELLINK is not fixed by a following
RTM_NEWLINK:
...
# ip link set dummy0 nomaster
36: dummy0: <BROADCAST,NOARP> mtu 1500 qdisc noop master bridge0 state DOWN
link/ether e2:f2:20:98:3a:be brd ff:ff:ff:ff:ff:ff
36: dummy0: <BROADCAST,NOARP> mtu 1500 master bridge0 state DOWN
link/ether e2:f2:20:98:3a:be
Deleted 36: dummy0: <BROADCAST,NOARP> mtu 1500 master bridge0 state DOWN
link/ether e2:f2:20:98:3a:be
37: bridge0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
37: bridge0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
So, when a slave is deleted, we have to refetch it too.
https://bugzilla.redhat.com/show_bug.cgi?id=1285719
2015-11-27 11:20:58 +01:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
break;
|
2015-06-19 16:24:18 +02:00
|
|
|
case NMP_OBJECT_TYPE_IP4_ADDRESS:
|
|
|
|
|
case NMP_OBJECT_TYPE_IP6_ADDRESS:
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
|
|
|
|
/* Address deletion is sometimes accompanied by route deletion. We need to
|
|
|
|
|
* check all routes belonging to the same interface. */
|
2017-06-23 15:46:40 +02:00
|
|
|
if (cache_op == NMP_CACHE_OPS_REMOVED) {
|
2015-04-06 18:29:36 +02:00
|
|
|
delayed_action_schedule (platform,
|
2015-06-19 16:24:18 +02:00
|
|
|
(klass->obj_type == NMP_OBJECT_TYPE_IP4_ADDRESS)
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
? DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES
|
|
|
|
|
: DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES,
|
|
|
|
|
NULL);
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
}
|
2016-04-10 11:21:50 +02:00
|
|
|
break;
|
2015-04-06 18:29:36 +02:00
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-04-25 14:53:39 +02:00
|
|
|
|
2017-08-16 16:13:24 +02:00
|
|
|
static guint32
|
|
|
|
|
_nlh_seq_next_get (NMLinuxPlatformPrivate *priv)
|
|
|
|
|
{
|
2018-03-09 17:44:17 +01:00
|
|
|
/* generate a new sequence number, but never return zero.
|
|
|
|
|
* Wrapping numbers are not a problem, because we don't rely
|
|
|
|
|
* on strictly increasing sequence numbers. */
|
|
|
|
|
return (++priv->nlh_seq_next) ?: (++priv->nlh_seq_next);
|
2017-08-16 16:13:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* _nl_send_nlmsghdr:
|
|
|
|
|
* @platform:
|
|
|
|
|
* @nlhdr:
|
|
|
|
|
* @out_seq_result:
|
|
|
|
|
* @response_type:
|
|
|
|
|
* @response_out_data:
|
|
|
|
|
*
|
2018-02-15 16:54:30 +01:00
|
|
|
* Returns: 0 on success or a negative errno.
|
2017-08-16 16:13:24 +02:00
|
|
|
*/
|
2015-12-11 18:20:54 +01:00
|
|
|
static int
|
2017-08-16 16:13:24 +02:00
|
|
|
_nl_send_nlmsghdr (NMPlatform *platform,
|
|
|
|
|
struct nlmsghdr *nlhdr,
|
|
|
|
|
WaitForNlResponseResult *out_seq_result,
|
2018-03-09 15:50:16 +01:00
|
|
|
char **out_errmsg,
|
2017-08-16 16:13:24 +02:00
|
|
|
DelayedActionWaitForNlResponseType response_type,
|
|
|
|
|
gpointer response_out_data)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
2015-12-11 18:20:54 +01:00
|
|
|
guint32 seq;
|
2019-01-31 13:29:21 +01:00
|
|
|
int errsv;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2017-08-16 16:13:24 +02:00
|
|
|
nm_assert (nlhdr);
|
2015-12-11 18:20:54 +01:00
|
|
|
|
2017-08-16 16:13:24 +02:00
|
|
|
seq = _nlh_seq_next_get (priv);
|
|
|
|
|
nlhdr->nlmsg_seq = seq;
|
2015-12-11 18:20:54 +01:00
|
|
|
|
2017-08-16 16:13:24 +02:00
|
|
|
{
|
|
|
|
|
struct sockaddr_nl nladdr = {
|
|
|
|
|
.nl_family = AF_NETLINK,
|
|
|
|
|
};
|
|
|
|
|
struct iovec iov = {
|
|
|
|
|
.iov_base = nlhdr,
|
|
|
|
|
.iov_len = nlhdr->nlmsg_len
|
|
|
|
|
};
|
|
|
|
|
struct msghdr msg = {
|
|
|
|
|
.msg_name = &nladdr,
|
2019-02-17 11:12:02 +01:00
|
|
|
.msg_namelen = sizeof (nladdr),
|
2017-08-16 16:13:24 +02:00
|
|
|
.msg_iov = &iov,
|
|
|
|
|
.msg_iovlen = 1,
|
|
|
|
|
};
|
|
|
|
|
int try_count;
|
|
|
|
|
|
|
|
|
|
if (!nlhdr->nlmsg_pid)
|
|
|
|
|
nlhdr->nlmsg_pid = nl_socket_get_local_port (priv->nlh);
|
|
|
|
|
nlhdr->nlmsg_flags |= (NLM_F_REQUEST | NLM_F_ACK);
|
|
|
|
|
|
|
|
|
|
try_count = 0;
|
|
|
|
|
again:
|
2019-01-31 13:29:21 +01:00
|
|
|
errsv = sendmsg (nl_socket_get_fd (priv->nlh), &msg, 0);
|
|
|
|
|
if (errsv < 0) {
|
|
|
|
|
errsv = errno;
|
|
|
|
|
if (errsv == EINTR && try_count++ < 100)
|
2017-08-16 16:13:24 +02:00
|
|
|
goto again;
|
2019-01-31 17:08:03 +01:00
|
|
|
_LOGD ("netlink: nl-send-nlmsghdr: failed sending message: %s (%d)", nm_strerror_native (errsv), errsv);
|
2019-01-31 13:29:21 +01:00
|
|
|
return -nm_errno_from_native (errsv);
|
2017-08-16 16:13:24 +02:00
|
|
|
}
|
|
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2018-03-09 15:50:16 +01:00
|
|
|
delayed_action_schedule_WAIT_FOR_NL_RESPONSE (platform, seq, out_seq_result, out_errmsg,
|
2017-08-16 16:13:24 +02:00
|
|
|
response_type, response_out_data);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2015-12-11 18:20:54 +01:00
|
|
|
|
2017-08-16 16:13:24 +02:00
|
|
|
/**
|
|
|
|
|
* _nl_send_nlmsg:
|
|
|
|
|
* @platform:
|
|
|
|
|
* @nlmsg:
|
|
|
|
|
* @out_seq_result:
|
|
|
|
|
* @response_type:
|
|
|
|
|
* @response_out_data:
|
|
|
|
|
*
|
|
|
|
|
* Returns: 0 on success, or a negative libnl3 error code (beware, it's not an errno).
|
|
|
|
|
*/
|
|
|
|
|
static int
|
|
|
|
|
_nl_send_nlmsg (NMPlatform *platform,
|
|
|
|
|
struct nl_msg *nlmsg,
|
|
|
|
|
WaitForNlResponseResult *out_seq_result,
|
2018-03-09 15:50:16 +01:00
|
|
|
char **out_errmsg,
|
2017-08-16 16:13:24 +02:00
|
|
|
DelayedActionWaitForNlResponseType response_type,
|
|
|
|
|
gpointer response_out_data)
|
|
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
struct nlmsghdr *nlhdr;
|
|
|
|
|
guint32 seq;
|
|
|
|
|
int nle;
|
|
|
|
|
|
|
|
|
|
nlhdr = nlmsg_hdr (nlmsg);
|
|
|
|
|
seq = _nlh_seq_next_get (priv);
|
|
|
|
|
nlhdr->nlmsg_seq = seq;
|
|
|
|
|
|
|
|
|
|
nle = nl_send_auto (priv->nlh, nlmsg);
|
|
|
|
|
if (nle < 0) {
|
2018-12-22 13:35:57 +01:00
|
|
|
_LOGD ("netlink: nl-send-nlmsg: failed sending message: %s (%d)", nm_strerror (nle), nle);
|
2017-08-16 16:13:24 +02:00
|
|
|
return nle;
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-09 15:50:16 +01:00
|
|
|
delayed_action_schedule_WAIT_FOR_NL_RESPONSE (platform, seq, out_seq_result, out_errmsg,
|
2017-08-16 16:13:24 +02:00
|
|
|
response_type, response_out_data);
|
|
|
|
|
return 0;
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2015-12-14 14:47:41 +01:00
|
|
|
do_request_link_no_delayed_actions (NMPlatform *platform, int ifindex, const char *name)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2017-10-30 11:20:53 +01:00
|
|
|
int nle;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
if (name && !name[0])
|
|
|
|
|
name = NULL;
|
|
|
|
|
|
|
|
|
|
g_return_if_fail (ifindex > 0 || name);
|
|
|
|
|
|
2017-10-30 11:20:53 +01:00
|
|
|
_LOGD ("do-request-link: %d %s", ifindex, name ?: "");
|
2015-04-06 18:29:36 +02:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
if (ifindex > 0) {
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
const NMDedupMultiEntry *entry;
|
|
|
|
|
|
2017-06-29 11:18:10 +02:00
|
|
|
entry = nmp_cache_lookup_entry_link (nm_platform_get_cache (platform), ifindex);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
if (entry) {
|
|
|
|
|
priv->pruning[DELAYED_ACTION_IDX_REFRESH_ALL_LINKS] = TRUE;
|
|
|
|
|
nm_dedup_multi_entry_set_dirty (entry, TRUE);
|
|
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
2015-12-15 10:40:41 +01:00
|
|
|
event_handler_read_netlink (platform, FALSE);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nlmsg = _nl_msg_new_link (RTM_GETLINK,
|
|
|
|
|
0,
|
|
|
|
|
ifindex,
|
|
|
|
|
name,
|
2015-11-02 14:27:22 +01:00
|
|
|
0,
|
2015-10-20 09:27:16 +02:00
|
|
|
0);
|
2017-10-30 11:20:53 +01:00
|
|
|
if (nlmsg) {
|
2018-03-09 15:50:16 +01:00
|
|
|
nle = _nl_send_nlmsg (platform, nlmsg, NULL, NULL, DELAYED_ACTION_RESPONSE_TYPE_VOID, NULL);
|
2017-10-30 11:20:53 +01:00
|
|
|
if (nle < 0) {
|
|
|
|
|
_LOGE ("do-request-link: %d %s: failed sending netlink request \"%s\" (%d)",
|
|
|
|
|
ifindex, name ?: "",
|
2018-12-22 13:35:57 +01:00
|
|
|
nm_strerror (nle), -nle);
|
2017-10-30 11:20:53 +01:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
static void
|
2015-12-14 14:47:41 +01:00
|
|
|
do_request_link (NMPlatform *platform, int ifindex, const char *name)
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
{
|
2015-12-14 14:47:41 +01:00
|
|
|
do_request_link_no_delayed_actions (platform, ifindex, name);
|
|
|
|
|
delayed_action_handle_all (platform, FALSE);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
static void
|
2015-12-14 14:47:41 +01:00
|
|
|
do_request_all_no_delayed_actions (NMPlatform *platform, DelayedActionType action_type)
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
DelayedActionType iflags;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
nm_assert (!NM_FLAGS_ANY (action_type, ~DELAYED_ACTION_TYPE_REFRESH_ALL));
|
|
|
|
|
action_type &= DELAYED_ACTION_TYPE_REFRESH_ALL;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2016-04-07 17:14:03 +02:00
|
|
|
FOR_EACH_DELAYED_ACTION (iflags, action_type) {
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
priv->pruning[delayed_action_refresh_all_to_idx (iflags)] = TRUE;
|
2017-06-29 11:18:10 +02:00
|
|
|
nmp_cache_dirty_set_all (nm_platform_get_cache (platform),
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
delayed_action_refresh_to_object_type (iflags));
|
2016-04-07 17:14:03 +02:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2016-04-07 17:14:03 +02:00
|
|
|
FOR_EACH_DELAYED_ACTION (iflags, action_type) {
|
|
|
|
|
NMPObjectType obj_type = delayed_action_refresh_to_object_type (iflags);
|
|
|
|
|
const NMPClass *klass = nmp_class_from_type (obj_type);
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
int nle;
|
2018-08-02 17:25:57 +02:00
|
|
|
int *out_refresh_all_in_progress;
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
|
2018-08-02 17:25:57 +02:00
|
|
|
out_refresh_all_in_progress = &priv->delayed_action.refresh_all_in_progress[delayed_action_refresh_all_to_idx (iflags)];
|
|
|
|
|
nm_assert (*out_refresh_all_in_progress >= 0);
|
|
|
|
|
*out_refresh_all_in_progress += 1;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2016-04-07 17:14:03 +02:00
|
|
|
/* clear any delayed action that request a refresh of this object type. */
|
|
|
|
|
priv->delayed_action.flags &= ~iflags;
|
|
|
|
|
_LOGt_delayed_action (iflags, NULL, "handle (do-request-all)");
|
|
|
|
|
if (obj_type == NMP_OBJECT_TYPE_LINK) {
|
|
|
|
|
priv->delayed_action.flags &= ~DELAYED_ACTION_TYPE_REFRESH_LINK;
|
|
|
|
|
g_ptr_array_set_size (priv->delayed_action.list_refresh_link, 0);
|
|
|
|
|
_LOGt_delayed_action (DELAYED_ACTION_TYPE_REFRESH_LINK, NULL, "clear (do-request-all)");
|
|
|
|
|
}
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2016-04-07 17:14:03 +02:00
|
|
|
event_handler_read_netlink (platform, FALSE);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2016-04-07 17:14:03 +02:00
|
|
|
/* reimplement
|
|
|
|
|
* nl_rtgen_request (sk, klass->rtm_gettype, klass->addr_family, NLM_F_DUMP);
|
|
|
|
|
* because we need the sequence number.
|
|
|
|
|
*/
|
|
|
|
|
nlmsg = nlmsg_alloc_simple (klass->rtm_gettype, NLM_F_DUMP);
|
|
|
|
|
|
2019-02-19 16:35:59 +01:00
|
|
|
switch (klass->obj_type) {
|
|
|
|
|
case NMP_OBJECT_TYPE_QDISC:
|
|
|
|
|
case NMP_OBJECT_TYPE_TFILTER:
|
|
|
|
|
{
|
|
|
|
|
const struct tcmsg tcmsg = {
|
|
|
|
|
.tcm_family = AF_UNSPEC,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
nle = nlmsg_append_struct (nlmsg, &tcmsg);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
{
|
|
|
|
|
const struct rtgenmsg gmsg = {
|
|
|
|
|
.rtgen_family = klass->addr_family,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
nle = nlmsg_append_struct (nlmsg, &gmsg);
|
|
|
|
|
}
|
|
|
|
|
break;
|
2017-11-15 20:36:35 +01:00
|
|
|
}
|
2019-02-20 14:53:27 +01:00
|
|
|
|
2016-04-07 17:14:03 +02:00
|
|
|
if (nle < 0)
|
2019-02-20 14:53:27 +01:00
|
|
|
goto next_after_fail;
|
2016-04-07 17:14:03 +02:00
|
|
|
|
2019-02-20 14:53:27 +01:00
|
|
|
if (_nl_send_nlmsg (platform,
|
|
|
|
|
nlmsg,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
DELAYED_ACTION_RESPONSE_TYPE_REFRESH_ALL_IN_PROGRESS,
|
|
|
|
|
out_refresh_all_in_progress) < 0)
|
|
|
|
|
goto next_after_fail;
|
|
|
|
|
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
next_after_fail:
|
|
|
|
|
nm_assert (*out_refresh_all_in_progress > 0);
|
|
|
|
|
*out_refresh_all_in_progress -= 1;
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
2015-12-14 14:47:41 +01:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
static void
|
|
|
|
|
do_request_one_type (NMPlatform *platform, NMPObjectType obj_type)
|
|
|
|
|
{
|
|
|
|
|
do_request_all_no_delayed_actions (platform, delayed_action_refresh_from_object_type (obj_type));
|
|
|
|
|
delayed_action_handle_all (platform, FALSE);
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
static void
|
|
|
|
|
event_seq_check_refresh_all (NMPlatform *platform, guint32 seq_number)
|
|
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
DelayedActionWaitForNlResponseData *data;
|
|
|
|
|
guint i;
|
|
|
|
|
|
|
|
|
|
if (NM_IN_SET (seq_number, 0, priv->nlh_seq_last_seen))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (NM_FLAGS_HAS (priv->delayed_action.flags, DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE)) {
|
|
|
|
|
nm_assert (priv->delayed_action.list_wait_for_nl_response->len > 0);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < priv->delayed_action.list_wait_for_nl_response->len; i++) {
|
|
|
|
|
data = &g_array_index (priv->delayed_action.list_wait_for_nl_response, DelayedActionWaitForNlResponseData, i);
|
|
|
|
|
|
2017-08-16 16:13:24 +02:00
|
|
|
if ( data->response_type == DELAYED_ACTION_RESPONSE_TYPE_REFRESH_ALL_IN_PROGRESS
|
2018-08-02 17:25:57 +02:00
|
|
|
&& data->response.out_refresh_all_in_progress
|
2017-08-16 16:13:24 +02:00
|
|
|
&& data->seq_number == priv->nlh_seq_last_seen) {
|
2018-08-02 17:25:57 +02:00
|
|
|
*data->response.out_refresh_all_in_progress -= 1;
|
|
|
|
|
data->response.out_refresh_all_in_progress = NULL;
|
2017-08-16 16:13:24 +02:00
|
|
|
break;
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
priv->nlh_seq_last_seen = seq_number;
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-13 10:03:22 +01:00
|
|
|
static void
|
2018-03-09 15:50:16 +01:00
|
|
|
event_seq_check (NMPlatform *platform, guint32 seq_number, WaitForNlResponseResult seq_result, const char *msg)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
2015-12-14 14:47:41 +01:00
|
|
|
DelayedActionWaitForNlResponseData *data;
|
|
|
|
|
guint i;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
if (seq_number == 0)
|
2015-12-13 10:03:22 +01:00
|
|
|
return;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
if (NM_FLAGS_HAS (priv->delayed_action.flags, DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE)) {
|
|
|
|
|
nm_assert (priv->delayed_action.list_wait_for_nl_response->len > 0);
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < priv->delayed_action.list_wait_for_nl_response->len; i++) {
|
|
|
|
|
data = &g_array_index (priv->delayed_action.list_wait_for_nl_response, DelayedActionWaitForNlResponseData, i);
|
|
|
|
|
|
|
|
|
|
if (data->seq_number == seq_number) {
|
|
|
|
|
/* We potentially receive many parts partial responses for the same sequence number.
|
|
|
|
|
* Thus, we only remember the result, and collect it later. */
|
|
|
|
|
if (data->seq_result < 0) {
|
2018-03-09 15:50:16 +01:00
|
|
|
/* we already saw an error for this sequence number.
|
2015-12-14 14:47:41 +01:00
|
|
|
* Preserve it. */
|
|
|
|
|
} else if ( seq_result != WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_UNKNOWN
|
|
|
|
|
|| data->seq_result == WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN)
|
|
|
|
|
data->seq_result = seq_result;
|
2018-03-09 15:50:16 +01:00
|
|
|
if (data->out_errmsg && !*data->out_errmsg)
|
|
|
|
|
*data->out_errmsg = g_strdup (msg);
|
2015-12-14 14:47:41 +01:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2018-08-26 19:45:22 +02:00
|
|
|
#if NM_MORE_LOGGING
|
2015-12-14 14:47:41 +01:00
|
|
|
if (seq_number != priv->nlh_seq_last_handled)
|
|
|
|
|
_LOGt ("netlink: recvmsg: unwaited sequence number %u", seq_number);
|
|
|
|
|
priv->nlh_seq_last_handled = seq_number;
|
2016-04-07 21:16:51 +02:00
|
|
|
#endif
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
}
|
|
|
|
|
|
2015-12-13 10:37:40 +01:00
|
|
|
static void
|
2016-01-24 18:46:14 +01:00
|
|
|
event_valid_msg (NMPlatform *platform, struct nl_msg *msg, gboolean handle_events)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
2017-08-16 16:13:24 +02:00
|
|
|
NMLinuxPlatformPrivate *priv;
|
2015-10-20 14:43:31 +02:00
|
|
|
nm_auto_nmpobj NMPObject *obj = NULL;
|
2015-12-12 22:11:33 +01:00
|
|
|
NMPCacheOpsType cache_op;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
struct nlmsghdr *msghdr;
|
2017-08-04 09:53:24 +02:00
|
|
|
char buf_nlmsghdr[400];
|
2015-10-12 16:07:01 +02:00
|
|
|
gboolean id_only = FALSE;
|
2017-06-29 11:18:10 +02:00
|
|
|
NMPCache *cache = nm_platform_get_cache (platform);
|
2017-08-05 15:14:44 +02:00
|
|
|
gboolean is_dump;
|
2013-03-27 22:23:24 +01:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
msghdr = nlmsg_hdr (msg);
|
2014-01-07 17:21:12 +01:00
|
|
|
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
if ( _support_kernel_extended_ifa_flags_still_undecided ()
|
|
|
|
|
&& msghdr->nlmsg_type == RTM_NEWADDR)
|
2015-05-06 11:55:02 +02:00
|
|
|
_support_kernel_extended_ifa_flags_detect (msg);
|
2014-01-07 17:21:12 +01:00
|
|
|
|
2016-01-24 18:46:14 +01:00
|
|
|
if (!handle_events)
|
|
|
|
|
return;
|
|
|
|
|
|
2015-10-12 16:07:01 +02:00
|
|
|
if (NM_IN_SET (msghdr->nlmsg_type, RTM_DELLINK, RTM_DELADDR, RTM_DELROUTE)) {
|
|
|
|
|
/* The event notifies about a deleted object. We don't need to initialize all
|
|
|
|
|
* fields of the object. */
|
|
|
|
|
id_only = TRUE;
|
|
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2017-06-29 11:18:10 +02:00
|
|
|
obj = nmp_object_new_from_nl (platform, cache, msg, id_only);
|
2015-10-12 16:07:01 +02:00
|
|
|
if (!obj) {
|
2017-08-04 09:53:24 +02:00
|
|
|
_LOGT ("event-notification: %s: ignore",
|
2018-02-16 10:40:33 +01:00
|
|
|
nl_nlmsghdr_to_str (msghdr, buf_nlmsghdr, sizeof (buf_nlmsghdr)));
|
2015-12-13 10:37:40 +01:00
|
|
|
return;
|
2015-08-05 18:20:00 +02:00
|
|
|
}
|
platform: avoid storing unknown netlink object types (bgo #742928)
Testing WWAN connections through a Nokia Series 40 phone, addresses of family
AF_PHONET end up triggering an assert() in object_has_ifindex(), just because
object_type_from_nl_object() only handles AF_INET and AF_INET6 address.
In order to avoid this kind of problems, we'll try to make sure that the object
caches kept by NM only store known object types.
(fixup by dcbw to use cached passed to cache_remove_unknown())
https://bugzilla.gnome.org/show_bug.cgi?id=742928
Connect: ppp0 <--> /dev/ttyACM0
nm-pppd-plugin-Message: nm-ppp-plugin: (nm_phasechange): status 5 / phase 'establish'
NetworkManager[27434]: <info> (ppp0): new Generic device (driver: 'unknown' ifindex: 12)
NetworkManager[27434]: <info> (ppp0): exported as /org/freedesktop/NetworkManager/Devices/4
[Thread 0x7ffff1ecf700 (LWP 27439) exited]
NetworkManager[27434]: <info> (ttyACM0): device state change: ip-config -> deactivating (reason 'user-requested') [70 110 39]
Terminating on signal 15
nm-pppd-plugin-Message: nm-ppp-plugin: (nm_phasechange): status 10 / phase 'terminate'
**
NetworkManager:ERROR:platform/nm-linux-platform.c:1534:object_has_ifindex: code should not be reached
Program received signal SIGABRT, Aborted.
0x00007ffff4692a97 in raise () from /usr/lib/libc.so.6
(gdb) bt
#0 0x00007ffff4692a97 in raise () from /usr/lib/libc.so.6
#1 0x00007ffff4693e6a in abort () from /usr/lib/libc.so.6
#2 0x00007ffff4c8d7f5 in g_assertion_message () from /usr/lib/libglib-2.0.so.0
#3 0x00007ffff4c8d88a in g_assertion_message_expr () from /usr/lib/libglib-2.0.so.0
#4 0x0000000000472b91 in object_has_ifindex (object=0x8a8320, ifindex=12) at platform/nm-linux-platform.c:1534
#5 0x0000000000472bec in check_cache_items (platform=0x7fe8a0, cache=0x7fda30, ifindex=12) at platform/nm-linux-platform.c:1549
#6 0x0000000000472de3 in announce_object (platform=0x7fe8a0, object=0x8a8c30, change_type=NM_PLATFORM_SIGNAL_REMOVED, reason=NM_PLATFORM_REASON_EXTERNAL) at platform/nm-linux-platform.c:1617
#7 0x0000000000473dd2 in event_notification (msg=0x8a7970, user_data=0x7fe8a0) at platform/nm-linux-platform.c:1992
#8 0x00007ffff5ee14de in nl_recvmsgs_report () from /usr/lib/libnl-3.so.200
#9 0x00007ffff5ee1849 in nl_recvmsgs () from /usr/lib/libnl-3.so.200
#10 0x00000000004794df in event_handler (channel=0x7fc930, io_condition=G_IO_IN, user_data=0x7fe8a0) at platform/nm-linux-platform.c:4152
#11 0x00007ffff4c6791d in g_main_context_dispatch () from /usr/lib/libglib-2.0.so.0
#12 0x00007ffff4c67cf8 in ?? () from /usr/lib/libglib-2.0.so.0
#13 0x00007ffff4c68022 in g_main_loop_run () from /usr/lib/libglib-2.0.so.0
#14 0x00000000004477ee in main (argc=1, argv=0x7fffffffeaa8) at main.c:447
(gdb) fr 4
#4 0x0000000000472b91 in object_has_ifindex (object=0x8a8320, ifindex=12) at platform/nm-linux-platform.c:1534
1534 g_assert_not_reached ();
2015-01-15 09:18:07 +01:00
|
|
|
|
2017-08-05 15:14:44 +02:00
|
|
|
switch (msghdr->nlmsg_type) {
|
|
|
|
|
case RTM_NEWADDR:
|
|
|
|
|
case RTM_NEWLINK:
|
|
|
|
|
case RTM_NEWROUTE:
|
2017-11-15 20:36:35 +01:00
|
|
|
case RTM_NEWQDISC:
|
2017-11-15 20:36:35 +01:00
|
|
|
case RTM_NEWTFILTER:
|
2017-08-05 15:14:44 +02:00
|
|
|
is_dump = delayed_action_refresh_all_in_progress (platform,
|
|
|
|
|
delayed_action_refresh_from_object_type (NMP_OBJECT_GET_TYPE (obj)));
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
is_dump = FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_LOGT ("event-notification: %s%s: %s",
|
2018-02-16 10:40:33 +01:00
|
|
|
nl_nlmsghdr_to_str (msghdr, buf_nlmsghdr, sizeof (buf_nlmsghdr)),
|
2017-08-05 15:14:44 +02:00
|
|
|
is_dump ? ", in-dump" : "",
|
2017-08-04 09:53:24 +02:00
|
|
|
nmp_object_to_string (obj,
|
|
|
|
|
id_only ? NMP_OBJECT_TO_STRING_ID : NMP_OBJECT_TO_STRING_PUBLIC,
|
|
|
|
|
NULL, 0));
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
{
|
|
|
|
|
nm_auto_nmpobj const NMPObject *obj_old = NULL;
|
|
|
|
|
nm_auto_nmpobj const NMPObject *obj_new = NULL;
|
|
|
|
|
|
|
|
|
|
switch (msghdr->nlmsg_type) {
|
|
|
|
|
|
|
|
|
|
case RTM_NEWLINK:
|
|
|
|
|
case RTM_NEWADDR:
|
|
|
|
|
case RTM_GETLINK:
|
2017-11-15 20:36:35 +01:00
|
|
|
case RTM_NEWQDISC:
|
2017-11-15 20:36:35 +01:00
|
|
|
case RTM_NEWTFILTER:
|
2017-08-05 15:14:44 +02:00
|
|
|
cache_op = nmp_cache_update_netlink (cache, obj, is_dump, &obj_old, &obj_new);
|
2017-07-31 09:51:45 +02:00
|
|
|
if (cache_op != NMP_CACHE_OPS_UNCHANGED) {
|
|
|
|
|
cache_on_change (platform, cache_op, obj_old, obj_new);
|
|
|
|
|
nm_platform_cache_update_emit_signal (platform, cache_op, obj_old, obj_new);
|
|
|
|
|
}
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
break;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
platform: fix cache to use kernel's notion for equality of routes
Until now, NetworkManager's platform cache for routes used the quadruple
network/plen,metric,ifindex for equaliy. That is not kernel's
understanding of how routes behave. For example, with `ip route append`
you can add two IPv4 routes that only differ by their gateway. To
the previous form of platform cache, these two routes would wrongly
look identical, as the cache could not contain both routes. This also
easily leads to cache-inconsistencies.
Now that we have NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID, fix the route's
compare operator to match kernel's.
Well, not entirely. Kernel understands more properties for routes then
NetworkManager. Some of these properties may also be part of the ID according
to kernel. To NetworkManager such routes would still look identical as
they only differ in a property that is not understood. This can still
cause cache-inconsistencies. The only fix here is to add support for
all these properties in NetworkManager as well. However, it's less serious,
because with this commit we support several of the more important properties.
See also the related bug rh#1337855 for kernel.
Another difficulty is that `ip route replace` and `ip route change`
changes an existing route. The replaced route has the same
NM_PLATFORM_IP_ROUTE_CMP_TYPE_WEAK_ID, but differ in the actual
NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID:
# ip -d -4 route show dev v
# ip monitor route &
# ip route add 192.168.5.0/24 dev v
192.168.5.0/24 dev v scope link
# ip route change 192.168.5.0/24 dev v scope 10
192.168.5.0/24 dev v scope 10
# ip -d -4 route show dev v
unicast 192.168.5.0/24 proto boot scope 10
Note that we only got one RTM_NEWROUTE message, although from NMPCache's
point of view, a new route (with a particular ID) was added and another
route (with a different ID) was deleted. The cumbersome workaround is,
to keep an ordered list of the routes, and figure out which route was
replaced in response to an RTM_NEWROUTE. In absence of bugs, this should
work fine. However, as we only rely on events, we might wrongly
introduce a cache-inconsistancy as well. See the related bug rh#1337860.
Also drop nm_platform_ip4_route_get() and the like. The ID of routes
is complex, so it makes little sense to look up a route directly.
2017-08-02 07:55:05 +02:00
|
|
|
case RTM_NEWROUTE: {
|
|
|
|
|
nm_auto_nmpobj const NMPObject *obj_replace = NULL;
|
|
|
|
|
gboolean resync_required = FALSE;
|
|
|
|
|
gboolean only_dirty = FALSE;
|
2018-03-05 14:26:15 +01:00
|
|
|
gboolean is_ipv6;
|
platform: fix cache to use kernel's notion for equality of routes
Until now, NetworkManager's platform cache for routes used the quadruple
network/plen,metric,ifindex for equaliy. That is not kernel's
understanding of how routes behave. For example, with `ip route append`
you can add two IPv4 routes that only differ by their gateway. To
the previous form of platform cache, these two routes would wrongly
look identical, as the cache could not contain both routes. This also
easily leads to cache-inconsistencies.
Now that we have NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID, fix the route's
compare operator to match kernel's.
Well, not entirely. Kernel understands more properties for routes then
NetworkManager. Some of these properties may also be part of the ID according
to kernel. To NetworkManager such routes would still look identical as
they only differ in a property that is not understood. This can still
cause cache-inconsistencies. The only fix here is to add support for
all these properties in NetworkManager as well. However, it's less serious,
because with this commit we support several of the more important properties.
See also the related bug rh#1337855 for kernel.
Another difficulty is that `ip route replace` and `ip route change`
changes an existing route. The replaced route has the same
NM_PLATFORM_IP_ROUTE_CMP_TYPE_WEAK_ID, but differ in the actual
NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID:
# ip -d -4 route show dev v
# ip monitor route &
# ip route add 192.168.5.0/24 dev v
192.168.5.0/24 dev v scope link
# ip route change 192.168.5.0/24 dev v scope 10
192.168.5.0/24 dev v scope 10
# ip -d -4 route show dev v
unicast 192.168.5.0/24 proto boot scope 10
Note that we only got one RTM_NEWROUTE message, although from NMPCache's
point of view, a new route (with a particular ID) was added and another
route (with a different ID) was deleted. The cumbersome workaround is,
to keep an ordered list of the routes, and figure out which route was
replaced in response to an RTM_NEWROUTE. In absence of bugs, this should
work fine. However, as we only rely on events, we might wrongly
introduce a cache-inconsistancy as well. See the related bug rh#1337860.
Also drop nm_platform_ip4_route_get() and the like. The ID of routes
is complex, so it makes little sense to look up a route directly.
2017-08-02 07:55:05 +02:00
|
|
|
|
2018-03-05 14:26:15 +01:00
|
|
|
/* IPv4 routes that are a response to RTM_GETROUTE must have
|
|
|
|
|
* the cloned flag while IPv6 routes don't have to. */
|
|
|
|
|
is_ipv6 = NMP_OBJECT_GET_TYPE (obj) == NMP_OBJECT_TYPE_IP6_ROUTE;
|
|
|
|
|
if (is_ipv6 || NM_FLAGS_HAS (obj->ip_route.r_rtm_flags, RTM_F_CLONED)) {
|
|
|
|
|
nm_assert (is_ipv6 || !nmp_object_is_alive (obj));
|
2017-08-16 16:13:24 +02:00
|
|
|
priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
if (NM_FLAGS_HAS (priv->delayed_action.flags, DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE)) {
|
|
|
|
|
guint i;
|
|
|
|
|
|
|
|
|
|
nm_assert (priv->delayed_action.list_wait_for_nl_response->len > 0);
|
|
|
|
|
for (i = 0; i < priv->delayed_action.list_wait_for_nl_response->len; i++) {
|
|
|
|
|
DelayedActionWaitForNlResponseData *data = &g_array_index (priv->delayed_action.list_wait_for_nl_response, DelayedActionWaitForNlResponseData, i);
|
|
|
|
|
|
|
|
|
|
if ( data->response_type == DELAYED_ACTION_RESPONSE_TYPE_ROUTE_GET
|
|
|
|
|
&& data->response.out_route_get) {
|
|
|
|
|
nm_assert (!*data->response.out_route_get);
|
|
|
|
|
if (data->seq_number == nlmsg_hdr (msg)->nlmsg_seq) {
|
|
|
|
|
*data->response.out_route_get = nmp_object_clone (obj, FALSE);
|
|
|
|
|
data->response.out_route_get = NULL;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
platform: fix cache to use kernel's notion for equality of routes
Until now, NetworkManager's platform cache for routes used the quadruple
network/plen,metric,ifindex for equaliy. That is not kernel's
understanding of how routes behave. For example, with `ip route append`
you can add two IPv4 routes that only differ by their gateway. To
the previous form of platform cache, these two routes would wrongly
look identical, as the cache could not contain both routes. This also
easily leads to cache-inconsistencies.
Now that we have NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID, fix the route's
compare operator to match kernel's.
Well, not entirely. Kernel understands more properties for routes then
NetworkManager. Some of these properties may also be part of the ID according
to kernel. To NetworkManager such routes would still look identical as
they only differ in a property that is not understood. This can still
cause cache-inconsistencies. The only fix here is to add support for
all these properties in NetworkManager as well. However, it's less serious,
because with this commit we support several of the more important properties.
See also the related bug rh#1337855 for kernel.
Another difficulty is that `ip route replace` and `ip route change`
changes an existing route. The replaced route has the same
NM_PLATFORM_IP_ROUTE_CMP_TYPE_WEAK_ID, but differ in the actual
NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID:
# ip -d -4 route show dev v
# ip monitor route &
# ip route add 192.168.5.0/24 dev v
192.168.5.0/24 dev v scope link
# ip route change 192.168.5.0/24 dev v scope 10
192.168.5.0/24 dev v scope 10
# ip -d -4 route show dev v
unicast 192.168.5.0/24 proto boot scope 10
Note that we only got one RTM_NEWROUTE message, although from NMPCache's
point of view, a new route (with a particular ID) was added and another
route (with a different ID) was deleted. The cumbersome workaround is,
to keep an ordered list of the routes, and figure out which route was
replaced in response to an RTM_NEWROUTE. In absence of bugs, this should
work fine. However, as we only rely on events, we might wrongly
introduce a cache-inconsistancy as well. See the related bug rh#1337860.
Also drop nm_platform_ip4_route_get() and the like. The ID of routes
is complex, so it makes little sense to look up a route directly.
2017-08-02 07:55:05 +02:00
|
|
|
cache_op = nmp_cache_update_netlink_route (cache,
|
|
|
|
|
obj,
|
|
|
|
|
is_dump,
|
|
|
|
|
msghdr->nlmsg_flags,
|
|
|
|
|
&obj_old,
|
|
|
|
|
&obj_new,
|
|
|
|
|
&obj_replace,
|
|
|
|
|
&resync_required);
|
|
|
|
|
if (cache_op != NMP_CACHE_OPS_UNCHANGED) {
|
|
|
|
|
if (obj_replace) {
|
|
|
|
|
const NMDedupMultiEntry *entry_replace;
|
|
|
|
|
|
|
|
|
|
/* we found an object that is to be replaced by the RTM_NEWROUTE message.
|
|
|
|
|
* While we invoke the signal, the platform cache might change and invalidate
|
|
|
|
|
* the findings. Mitigate that (for the most part), by marking the entry as
|
|
|
|
|
* dirty and only delete @obj_replace if it is still dirty afterwards.
|
|
|
|
|
*
|
|
|
|
|
* Yes, there is a tiny tiny chance for still getting it wrong. But in practice,
|
|
|
|
|
* the signal handlers do not cause to call the platform again, so the cache
|
|
|
|
|
* is not really changing. -- if they would, it would anyway be dangerous to overflow
|
|
|
|
|
* the stack and it's not ensured that the processing of netlink messages is
|
|
|
|
|
* reentrant (maybe it is).
|
|
|
|
|
*/
|
|
|
|
|
entry_replace = nmp_cache_lookup_entry (cache, obj_replace);
|
|
|
|
|
nm_assert (entry_replace && entry_replace->obj == obj_replace);
|
|
|
|
|
nm_dedup_multi_entry_set_dirty (entry_replace, TRUE);
|
|
|
|
|
only_dirty = TRUE;
|
|
|
|
|
}
|
|
|
|
|
cache_on_change (platform, cache_op, obj_old, obj_new);
|
|
|
|
|
nm_platform_cache_update_emit_signal (platform, cache_op, obj_old, obj_new);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (obj_replace) {
|
|
|
|
|
/* the RTM_NEWROUTE message indicates that another route was replaced.
|
|
|
|
|
* Remove it now. */
|
|
|
|
|
cache_op = nmp_cache_remove (cache, obj_replace, TRUE, only_dirty, NULL);
|
|
|
|
|
if (cache_op != NMP_CACHE_OPS_UNCHANGED) {
|
|
|
|
|
nm_assert (cache_op == NMP_CACHE_OPS_REMOVED);
|
|
|
|
|
cache_on_change (platform, cache_op, obj_replace, NULL);
|
|
|
|
|
nm_platform_cache_update_emit_signal (platform, cache_op, obj_replace, NULL);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (resync_required) {
|
|
|
|
|
/* we'd like to avoid such resyncs as they are expensive and we should only rely on the
|
|
|
|
|
* netlink events. This needs investigation. */
|
|
|
|
|
_LOGT ("schedule resync of routes after RTM_NEWROUTE");
|
|
|
|
|
delayed_action_schedule (platform,
|
|
|
|
|
delayed_action_refresh_from_object_type (NMP_OBJECT_GET_TYPE (obj)),
|
|
|
|
|
NULL);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
case RTM_DELLINK:
|
|
|
|
|
case RTM_DELADDR:
|
|
|
|
|
case RTM_DELROUTE:
|
2017-11-15 20:36:35 +01:00
|
|
|
case RTM_DELQDISC:
|
2017-11-15 20:36:35 +01:00
|
|
|
case RTM_DELTFILTER:
|
2017-06-29 11:18:10 +02:00
|
|
|
cache_op = nmp_cache_remove_netlink (cache, obj, &obj_old, &obj_new);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
if (cache_op != NMP_CACHE_OPS_UNCHANGED) {
|
|
|
|
|
cache_on_change (platform, cache_op, obj_old, obj_new);
|
2017-06-29 13:13:54 +02:00
|
|
|
nm_platform_cache_update_emit_signal (platform, cache_op, obj_old, obj_new);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
}
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
static int
|
2015-12-14 17:16:13 +01:00
|
|
|
do_add_link_with_lookup (NMPlatform *platform,
|
|
|
|
|
NMLinkType link_type,
|
|
|
|
|
const char *name,
|
|
|
|
|
struct nl_msg *nlmsg,
|
|
|
|
|
const NMPlatformLink **out_link)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
2015-12-14 17:16:13 +01:00
|
|
|
const NMPObject *obj = NULL;
|
|
|
|
|
WaitForNlResponseResult seq_result = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN;
|
2018-03-09 15:50:16 +01:00
|
|
|
gs_free char *errmsg = NULL;
|
2015-04-06 18:29:36 +02:00
|
|
|
int nle;
|
2015-12-14 17:16:13 +01:00
|
|
|
char s_buf[256];
|
2017-06-29 11:18:10 +02:00
|
|
|
NMPCache *cache = nm_platform_get_cache (platform);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-15 10:40:41 +01:00
|
|
|
event_handler_read_netlink (platform, FALSE);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2018-03-09 15:50:16 +01:00
|
|
|
nle = _nl_send_nlmsg (platform, nlmsg, &seq_result, &errmsg, DELAYED_ACTION_RESPONSE_TYPE_VOID, NULL);
|
2015-04-06 18:29:36 +02:00
|
|
|
if (nle < 0) {
|
2015-12-14 17:16:13 +01:00
|
|
|
_LOGE ("do-add-link[%s/%s]: failed sending netlink request \"%s\" (%d)",
|
2015-10-20 09:27:16 +02:00
|
|
|
name,
|
|
|
|
|
nm_link_type_to_string (link_type),
|
2018-12-22 13:35:57 +01:00
|
|
|
nm_strerror (nle), -nle);
|
2017-08-21 16:11:31 +02:00
|
|
|
NM_SET_OUT (out_link, NULL);
|
2018-11-05 12:35:40 +01:00
|
|
|
return nle;
|
2015-10-20 09:27:16 +02:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 17:16:13 +01:00
|
|
|
delayed_action_handle_all (platform, FALSE);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-12-14 17:16:13 +01:00
|
|
|
nm_assert (seq_result);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 17:16:13 +01:00
|
|
|
_NMLOG (seq_result == WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK
|
|
|
|
|
? LOGL_DEBUG
|
2017-08-21 16:11:31 +02:00
|
|
|
: LOGL_WARN,
|
2015-12-14 17:16:13 +01:00
|
|
|
"do-add-link[%s/%s]: %s",
|
|
|
|
|
name,
|
|
|
|
|
nm_link_type_to_string (link_type),
|
2018-03-09 15:50:16 +01:00
|
|
|
wait_for_nl_response_to_string (seq_result, errmsg, s_buf, sizeof (s_buf)));
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2017-08-21 16:11:31 +02:00
|
|
|
if (out_link) {
|
2017-06-29 11:18:10 +02:00
|
|
|
obj = nmp_cache_lookup_link_full (cache, 0, name, FALSE, link_type, NULL, NULL);
|
2017-08-21 16:11:31 +02:00
|
|
|
*out_link = NMP_OBJECT_CAST_LINK (obj);
|
2015-12-14 17:16:13 +01:00
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
return wait_for_nl_response_to_nmerr (seq_result);
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
2017-08-21 18:02:08 +02:00
|
|
|
do_add_addrroute (NMPlatform *platform,
|
|
|
|
|
const NMPObject *obj_id,
|
|
|
|
|
struct nl_msg *nlmsg,
|
|
|
|
|
gboolean suppress_netlink_failure)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
2015-12-14 17:16:13 +01:00
|
|
|
WaitForNlResponseResult seq_result = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN;
|
2018-03-09 15:50:16 +01:00
|
|
|
gs_free char *errmsg = NULL;
|
2015-04-06 18:29:36 +02:00
|
|
|
int nle;
|
2015-12-14 17:16:13 +01:00
|
|
|
char s_buf[256];
|
2015-04-06 18:29:36 +02:00
|
|
|
|
|
|
|
|
nm_assert (NM_IN_SET (NMP_OBJECT_GET_TYPE (obj_id),
|
2015-06-19 16:24:18 +02:00
|
|
|
NMP_OBJECT_TYPE_IP4_ADDRESS, NMP_OBJECT_TYPE_IP6_ADDRESS,
|
|
|
|
|
NMP_OBJECT_TYPE_IP4_ROUTE, NMP_OBJECT_TYPE_IP6_ROUTE));
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-15 10:40:41 +01:00
|
|
|
event_handler_read_netlink (platform, FALSE);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2018-03-09 15:50:16 +01:00
|
|
|
nle = _nl_send_nlmsg (platform, nlmsg, &seq_result, &errmsg, DELAYED_ACTION_RESPONSE_TYPE_VOID, NULL);
|
2015-04-06 18:29:36 +02:00
|
|
|
if (nle < 0) {
|
2015-10-20 09:27:16 +02:00
|
|
|
_LOGE ("do-add-%s[%s]: failure sending netlink request \"%s\" (%d)",
|
2015-04-06 18:29:36 +02:00
|
|
|
NMP_OBJECT_GET_CLASS (obj_id)->obj_type_name,
|
2015-10-20 09:27:16 +02:00
|
|
|
nmp_object_to_string (obj_id, NMP_OBJECT_TO_STRING_ID, NULL, 0),
|
2018-12-22 13:35:57 +01:00
|
|
|
nm_strerror (nle), -nle);
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return -NME_PL_NETLINK;
|
2015-10-20 09:27:16 +02:00
|
|
|
}
|
|
|
|
|
|
2015-12-14 17:16:13 +01:00
|
|
|
delayed_action_handle_all (platform, FALSE);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 17:16:13 +01:00
|
|
|
nm_assert (seq_result);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2017-08-21 18:02:08 +02:00
|
|
|
_NMLOG (( seq_result == WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK
|
|
|
|
|
|| ( suppress_netlink_failure
|
|
|
|
|
&& seq_result < 0))
|
2015-12-14 17:16:13 +01:00
|
|
|
? LOGL_DEBUG
|
2017-08-21 15:33:57 +02:00
|
|
|
: LOGL_WARN,
|
2015-12-14 17:16:13 +01:00
|
|
|
"do-add-%s[%s]: %s",
|
|
|
|
|
NMP_OBJECT_GET_CLASS (obj_id)->obj_type_name,
|
|
|
|
|
nmp_object_to_string (obj_id, NMP_OBJECT_TO_STRING_ID, NULL, 0),
|
2018-03-09 15:50:16 +01:00
|
|
|
wait_for_nl_response_to_string (seq_result, errmsg, s_buf, sizeof (s_buf)));
|
2015-12-14 17:16:13 +01:00
|
|
|
|
2017-08-21 15:33:57 +02:00
|
|
|
if (NMP_OBJECT_GET_TYPE (obj_id) == NMP_OBJECT_TYPE_IP6_ADDRESS) {
|
|
|
|
|
/* In rare cases, the object is not yet ready as we received the ACK from
|
|
|
|
|
* kernel. Need to refetch.
|
|
|
|
|
*
|
|
|
|
|
* We want to safe the expensive refetch, thus we look first into the cache
|
|
|
|
|
* whether the object exists.
|
|
|
|
|
*
|
|
|
|
|
* rh#1484434 */
|
platform: refetch IPv6 address if still present after deletion
After commit 5a69b27a64a0 ("platform: let platform operations only
consider kernel response") the platform only relies on kernel messages
and doesn't check if a deleted object is gone from the cache. For IPv6
addresses it can happen that the RTM_DELADDR comes after the ack, and
this causes random failures in test /address/ipv6/general-2:
[10.8009] platform: address: deleting IPv6 address 2001:db8:a:b:1:2:3:4/64, ifindex 12 dev nm-test-device
[10.8009] platform-linux: delayed-action: schedule wait-for-nl-response (seq 55, timeout in 0.199999680, response-type 0)
[10.8009] platform-linux: delayed-action: handle wait-for-nl-response (any)
[10.8009] platform-linux: netlink: recvmsg: new message (2), flags 0x0100, seq 55
[10.8009] platform-linux: delayed-action: complete wait-for-nl-response (seq 55, timeout in 0.199980533, response-type 0, success)
[10.8009] platform-linux: do-delete-ip6-address[12: 2001:db8:a:b:1:2:3:4]: success
**
NetworkManager:ERROR:src/platform/tests/test-common.c:1127:_ip_address_del: assertion failed: (external_command)
Use the same workaround in place for the addition of IPv6 addresses,
i.e. refetch the object if the address is still present after the ack.
2017-09-07 09:23:42 +02:00
|
|
|
if (!nmp_cache_lookup_obj (nm_platform_get_cache (platform), obj_id))
|
2017-08-21 15:33:57 +02:00
|
|
|
do_request_one_type (platform, NMP_OBJECT_GET_TYPE (obj_id));
|
2015-12-15 13:37:32 +01:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return wait_for_nl_response_to_nmerr (seq_result);
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
2015-10-20 09:27:16 +02:00
|
|
|
do_delete_object (NMPlatform *platform, const NMPObject *obj_id, struct nl_msg *nlmsg)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
2015-12-14 18:42:29 +01:00
|
|
|
WaitForNlResponseResult seq_result = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN;
|
2018-03-09 15:50:16 +01:00
|
|
|
gs_free char *errmsg = NULL;
|
2015-04-06 18:29:36 +02:00
|
|
|
int nle;
|
2015-12-14 18:42:29 +01:00
|
|
|
char s_buf[256];
|
2017-08-21 16:11:31 +02:00
|
|
|
gboolean success;
|
2015-12-14 18:42:29 +01:00
|
|
|
const char *log_detail = "";
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-15 10:40:41 +01:00
|
|
|
event_handler_read_netlink (platform, FALSE);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2018-03-09 15:50:16 +01:00
|
|
|
nle = _nl_send_nlmsg (platform, nlmsg, &seq_result, &errmsg, DELAYED_ACTION_RESPONSE_TYPE_VOID, NULL);
|
2015-10-20 09:27:16 +02:00
|
|
|
if (nle < 0) {
|
|
|
|
|
_LOGE ("do-delete-%s[%s]: failure sending netlink request \"%s\" (%d)",
|
|
|
|
|
NMP_OBJECT_GET_CLASS (obj_id)->obj_type_name,
|
|
|
|
|
nmp_object_to_string (obj_id, NMP_OBJECT_TO_STRING_ID, NULL, 0),
|
2018-12-22 13:35:57 +01:00
|
|
|
nm_strerror (nle), -nle);
|
2017-08-21 16:11:31 +02:00
|
|
|
return FALSE;
|
2015-10-20 09:27:16 +02:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 18:42:29 +01:00
|
|
|
delayed_action_handle_all (platform, FALSE);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2015-12-14 18:42:29 +01:00
|
|
|
nm_assert (seq_result);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2017-08-21 16:11:31 +02:00
|
|
|
success = TRUE;
|
2015-12-14 18:42:29 +01:00
|
|
|
if (seq_result == WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK) {
|
|
|
|
|
/* ok */
|
|
|
|
|
} else if (NM_IN_SET (-((int) seq_result), ESRCH, ENOENT))
|
|
|
|
|
log_detail = ", meaning the object was already removed";
|
|
|
|
|
else if ( NM_IN_SET (-((int) seq_result), ENXIO)
|
|
|
|
|
&& NM_IN_SET (NMP_OBJECT_GET_TYPE (obj_id), NMP_OBJECT_TYPE_IP6_ADDRESS)) {
|
|
|
|
|
/* On RHEL7 kernel, deleting a non existing address fails with ENXIO */
|
|
|
|
|
log_detail = ", meaning the address was already removed";
|
|
|
|
|
} else if ( NM_IN_SET (-((int) seq_result), EADDRNOTAVAIL)
|
|
|
|
|
&& NM_IN_SET (NMP_OBJECT_GET_TYPE (obj_id), NMP_OBJECT_TYPE_IP4_ADDRESS, NMP_OBJECT_TYPE_IP6_ADDRESS))
|
|
|
|
|
log_detail = ", meaning the address was already removed";
|
|
|
|
|
else
|
|
|
|
|
success = FALSE;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2017-08-21 16:11:31 +02:00
|
|
|
_NMLOG (success ? LOGL_DEBUG : LOGL_WARN,
|
2015-12-14 18:42:29 +01:00
|
|
|
"do-delete-%s[%s]: %s%s",
|
|
|
|
|
NMP_OBJECT_GET_CLASS (obj_id)->obj_type_name,
|
|
|
|
|
nmp_object_to_string (obj_id, NMP_OBJECT_TO_STRING_ID, NULL, 0),
|
2018-03-09 15:50:16 +01:00
|
|
|
wait_for_nl_response_to_string (seq_result, errmsg, s_buf, sizeof (s_buf)),
|
2015-12-14 18:42:29 +01:00
|
|
|
log_detail);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2018-01-08 13:51:53 +01:00
|
|
|
if (NM_IN_SET (NMP_OBJECT_GET_TYPE (obj_id),
|
|
|
|
|
NMP_OBJECT_TYPE_IP6_ADDRESS,
|
|
|
|
|
NMP_OBJECT_TYPE_QDISC,
|
|
|
|
|
NMP_OBJECT_TYPE_TFILTER)) {
|
platform: refetch IPv6 address if still present after deletion
After commit 5a69b27a64a0 ("platform: let platform operations only
consider kernel response") the platform only relies on kernel messages
and doesn't check if a deleted object is gone from the cache. For IPv6
addresses it can happen that the RTM_DELADDR comes after the ack, and
this causes random failures in test /address/ipv6/general-2:
[10.8009] platform: address: deleting IPv6 address 2001:db8:a:b:1:2:3:4/64, ifindex 12 dev nm-test-device
[10.8009] platform-linux: delayed-action: schedule wait-for-nl-response (seq 55, timeout in 0.199999680, response-type 0)
[10.8009] platform-linux: delayed-action: handle wait-for-nl-response (any)
[10.8009] platform-linux: netlink: recvmsg: new message (2), flags 0x0100, seq 55
[10.8009] platform-linux: delayed-action: complete wait-for-nl-response (seq 55, timeout in 0.199980533, response-type 0, success)
[10.8009] platform-linux: do-delete-ip6-address[12: 2001:db8:a:b:1:2:3:4]: success
**
NetworkManager:ERROR:src/platform/tests/test-common.c:1127:_ip_address_del: assertion failed: (external_command)
Use the same workaround in place for the addition of IPv6 addresses,
i.e. refetch the object if the address is still present after the ack.
2017-09-07 09:23:42 +02:00
|
|
|
/* In rare cases, the object is still there after we receive the ACK from
|
|
|
|
|
* kernel. Need to refetch.
|
|
|
|
|
*
|
|
|
|
|
* We want to safe the expensive refetch, thus we look first into the cache
|
|
|
|
|
* whether the object exists.
|
|
|
|
|
*
|
|
|
|
|
* rh#1484434 */
|
|
|
|
|
if (nmp_cache_lookup_obj (nm_platform_get_cache (platform), obj_id))
|
|
|
|
|
do_request_one_type (platform, NMP_OBJECT_GET_TYPE (obj_id));
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-21 16:11:31 +02:00
|
|
|
return success;
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
2017-10-23 13:26:14 +02:00
|
|
|
do_change_link (NMPlatform *platform,
|
|
|
|
|
ChangeLinkType change_link_type,
|
|
|
|
|
int ifindex,
|
|
|
|
|
struct nl_msg *nlmsg,
|
|
|
|
|
const ChangeLinkData *data)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
2016-03-08 13:02:09 +01:00
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
2015-04-06 18:29:36 +02:00
|
|
|
int nle;
|
2017-10-23 13:26:14 +02:00
|
|
|
WaitForNlResponseResult seq_result = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN;
|
2018-03-09 15:50:16 +01:00
|
|
|
gs_free char *errmsg = NULL;
|
2017-10-23 13:26:14 +02:00
|
|
|
char s_buf[256];
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
int result = 0;
|
2017-10-23 13:26:14 +02:00
|
|
|
NMLogLevel log_level = LOGL_DEBUG;
|
|
|
|
|
const char *log_result = "failure";
|
|
|
|
|
const char *log_detail = "";
|
|
|
|
|
gs_free char *log_detail_free = NULL;
|
|
|
|
|
const NMPObject *obj_cache;
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2017-10-23 13:26:14 +02:00
|
|
|
if (!nm_platform_netns_push (platform, &netns)) {
|
|
|
|
|
log_level = LOGL_ERR;
|
|
|
|
|
log_detail = ", failure to change network namespace";
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
2016-03-08 13:02:09 +01:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
retry:
|
2018-03-09 15:50:16 +01:00
|
|
|
nle = _nl_send_nlmsg (platform, nlmsg, &seq_result, &errmsg, DELAYED_ACTION_RESPONSE_TYPE_VOID, NULL);
|
2015-10-20 09:27:16 +02:00
|
|
|
if (nle < 0) {
|
2017-10-23 13:26:14 +02:00
|
|
|
log_level = LOGL_ERR;
|
|
|
|
|
log_detail_free = g_strdup_printf (", failure sending netlink request: %s (%d)",
|
2018-12-22 13:35:57 +01:00
|
|
|
nm_strerror (nle), -nle);
|
2017-10-23 13:26:14 +02:00
|
|
|
log_detail = log_detail_free;
|
|
|
|
|
goto out;
|
2015-10-20 09:27:16 +02:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-12-14 19:18:35 +01:00
|
|
|
/* always refetch the link after changing it. There seems to be issues
|
|
|
|
|
* and we sometimes lack events. Nuke it from the orbit... */
|
|
|
|
|
delayed_action_schedule (platform, DELAYED_ACTION_TYPE_REFRESH_LINK, GINT_TO_POINTER (ifindex));
|
|
|
|
|
|
|
|
|
|
delayed_action_handle_all (platform, FALSE);
|
|
|
|
|
|
|
|
|
|
nm_assert (seq_result);
|
|
|
|
|
|
|
|
|
|
if ( NM_IN_SET (-((int) seq_result), EOPNOTSUPP)
|
2015-10-20 09:27:16 +02:00
|
|
|
&& nlmsg_hdr (nlmsg)->nlmsg_type == RTM_NEWLINK) {
|
|
|
|
|
nlmsg_hdr (nlmsg)->nlmsg_type = RTM_SETLINK;
|
|
|
|
|
goto retry;
|
|
|
|
|
}
|
2017-10-23 13:25:54 +02:00
|
|
|
|
2015-12-14 19:18:35 +01:00
|
|
|
if (seq_result == WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK) {
|
|
|
|
|
log_result = "success";
|
|
|
|
|
} else if (NM_IN_SET (-((int) seq_result), EEXIST, EADDRINUSE)) {
|
|
|
|
|
/* */
|
|
|
|
|
} else if (NM_IN_SET (-((int) seq_result), ESRCH, ENOENT)) {
|
|
|
|
|
log_detail = ", firmware not found";
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
result = -NME_PL_NO_FIRMWARE;
|
2017-10-23 12:48:49 +02:00
|
|
|
} else if ( NM_IN_SET (-((int) seq_result), ERANGE)
|
|
|
|
|
&& change_link_type == CHANGE_LINK_TYPE_SET_MTU) {
|
|
|
|
|
log_detail = ", setting MTU to requested size is not possible";
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
result = -NME_PL_CANT_SET_MTU;
|
2017-10-23 13:17:21 +02:00
|
|
|
} else if ( NM_IN_SET (-((int) seq_result), ENFILE)
|
|
|
|
|
&& change_link_type == CHANGE_LINK_TYPE_SET_ADDRESS
|
|
|
|
|
&& (obj_cache = nmp_cache_lookup_link (nm_platform_get_cache (platform), ifindex))
|
|
|
|
|
&& obj_cache->link.addr.len == data->set_address.length
|
|
|
|
|
&& memcmp (obj_cache->link.addr.data, data->set_address.address, data->set_address.length) == 0) {
|
|
|
|
|
/* workaround ENFILE which may be wrongly returned (bgo #770456).
|
|
|
|
|
* If the MAC address is as expected, assume success? */
|
|
|
|
|
log_result = "success";
|
|
|
|
|
log_detail = " (assume success changing address)";
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
result = 0;
|
2016-07-05 10:41:18 +02:00
|
|
|
} else if (NM_IN_SET (-((int) seq_result), ENODEV)) {
|
|
|
|
|
log_level = LOGL_DEBUG;
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
result = -NME_PL_NOT_FOUND;
|
2018-08-15 18:27:06 +02:00
|
|
|
} else if (-((int) seq_result) == EAFNOSUPPORT) {
|
|
|
|
|
log_level = LOGL_DEBUG;
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
result = -NME_PL_OPNOTSUPP;
|
2015-12-14 19:18:35 +01:00
|
|
|
} else {
|
2017-08-21 16:11:31 +02:00
|
|
|
log_level = LOGL_WARN;
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
result = -NME_UNSPEC;
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
2017-10-23 13:26:14 +02:00
|
|
|
|
|
|
|
|
out:
|
2015-12-14 19:18:35 +01:00
|
|
|
_NMLOG (log_level,
|
|
|
|
|
"do-change-link[%d]: %s changing link: %s%s",
|
|
|
|
|
ifindex,
|
|
|
|
|
log_result,
|
2018-03-09 15:50:16 +01:00
|
|
|
wait_for_nl_response_to_string (seq_result, errmsg, s_buf, sizeof (s_buf)),
|
2015-12-14 19:18:35 +01:00
|
|
|
log_detail);
|
|
|
|
|
return result;
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
static int
|
2014-09-18 12:53:19 -05:00
|
|
|
link_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
|
|
|
|
NMLinkType type,
|
2017-05-26 14:00:29 +02:00
|
|
|
const char *veth_peer,
|
2014-09-18 12:53:19 -05:00
|
|
|
const void *address,
|
|
|
|
|
size_t address_len,
|
2015-12-09 15:13:57 +01:00
|
|
|
const NMPlatformLink **out_link)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2013-05-09 10:51:27 -05:00
|
|
|
|
2013-04-26 21:20:57 +02:00
|
|
|
if (type == NM_LINK_TYPE_BOND) {
|
|
|
|
|
/* When the kernel loads the bond module, either via explicit modprobe
|
|
|
|
|
* or automatically in response to creating a bond master, it will also
|
|
|
|
|
* create a 'bond0' interface. Since the bond we're about to create may
|
|
|
|
|
* or may not be named 'bond0' prevent potential confusion about a bond
|
|
|
|
|
* that the user didn't want by telling the bonding module not to create
|
|
|
|
|
* bond0 automatically.
|
|
|
|
|
*/
|
|
|
|
|
if (!g_file_test ("/sys/class/net/bonding_masters", G_FILE_TEST_EXISTS))
|
2016-03-04 09:02:45 +01:00
|
|
|
(void) nm_utils_modprobe (NULL, TRUE, "bonding", "max_bonds=0", NULL);
|
2013-04-26 21:20:57 +02:00
|
|
|
}
|
|
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
2016-04-08 15:05:35 +02:00
|
|
|
NLM_F_CREATE | NLM_F_EXCL,
|
2015-10-20 09:27:16 +02:00
|
|
|
0,
|
|
|
|
|
name,
|
2015-11-02 14:27:22 +01:00
|
|
|
0,
|
2015-10-20 09:27:16 +02:00
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
2018-11-05 12:35:40 +01:00
|
|
|
return -NME_UNSPEC;
|
2014-05-13 18:13:52 +02:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
if (address && address_len)
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_ADDRESS, address_len, address);
|
2014-05-13 18:13:52 +02:00
|
|
|
|
2017-05-26 14:00:29 +02:00
|
|
|
if (!_nl_msg_new_link_set_linkinfo (nlmsg, type, veth_peer))
|
2018-11-05 12:35:40 +01:00
|
|
|
return -NME_UNSPEC;
|
2014-09-18 12:53:19 -05:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
return do_add_link_with_lookup (platform, type, name, nlmsg, out_link);
|
|
|
|
|
nla_put_failure:
|
2018-11-05 12:35:40 +01:00
|
|
|
g_return_val_if_reached (-NME_BUG);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
link_delete (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2015-10-26 09:19:15 +01:00
|
|
|
NMPObject obj_id;
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
const NMPObject *obj;
|
2013-07-26 17:03:39 +02:00
|
|
|
|
2017-06-29 11:18:10 +02:00
|
|
|
obj = nmp_cache_lookup_link (nm_platform_get_cache (platform), ifindex);
|
platform: drop nm_platform_get_error()
For NMPlatform instances we had an error reporting mechanism
which stores the last error reason in a private field. Later we
would check it via nm_platform_get_error().
Remove this. It was not used much, and it is not a great way
to report errors.
One problem is that at the point where the error happens, you don't
know whether anybody cares about an error code. So, you add code to set
the error reason because somebody *might* need it (but in realitiy, almost
no caller cares).
Also, we tested this functionality which is hardly used in non-testing code.
While this was a burden to maintain in the tests, it was likely still buggy
because there were no real use-cases, beside the tests.
Then, sometimes platform functions call each other which might overwrite the
error reason. So, every function must be cautious to preserve/set
the error reason according to it's own meaning. This can involve storing
the error code, calling another function, and restoring it afterwards.
This is harder to get right compared to a "return-error-code" pattern, where
every function manages its error code independently.
It is better to return the error reason whenever due. For that we already
have our common glib patterns
(1) gboolean fcn (...);
(2) gboolean fcn (..., GError **error);
In few cases, we need more details then a #gboolean, but don't want
to bother constructing a #GError. Then we should do instead:
(3) NMPlatformError fcn (...);
2015-06-15 17:58:36 +02:00
|
|
|
if (!obj || !obj->_link.netlink.is_in_netlink)
|
2013-07-26 17:03:39 +02:00
|
|
|
return FALSE;
|
|
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nlmsg = _nl_msg_new_link (RTM_DELLINK,
|
|
|
|
|
0,
|
|
|
|
|
ifindex,
|
|
|
|
|
NULL,
|
2015-11-02 14:27:22 +01:00
|
|
|
0,
|
2015-10-20 09:27:16 +02:00
|
|
|
0);
|
|
|
|
|
|
2015-10-26 09:19:15 +01:00
|
|
|
nmp_object_stackinit_id_link (&obj_id, ifindex);
|
2015-10-20 09:27:16 +02:00
|
|
|
return do_delete_object (platform, &obj_id, nlmsg);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
2014-02-11 13:58:00 +01:00
|
|
|
static gboolean
|
|
|
|
|
link_refresh (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2015-12-14 14:47:41 +01:00
|
|
|
do_request_link (platform, ifindex, NULL);
|
2017-07-04 12:49:47 +02:00
|
|
|
return !!nm_platform_link_get_obj (platform, ifindex, TRUE);
|
2014-02-11 13:58:00 +01:00
|
|
|
}
|
|
|
|
|
|
2018-02-08 15:17:23 +01:00
|
|
|
static void
|
|
|
|
|
refresh_all (NMPlatform *platform, NMPObjectType obj_type)
|
|
|
|
|
{
|
|
|
|
|
do_request_one_type (platform, obj_type);
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-08 13:02:58 +01:00
|
|
|
static gboolean
|
|
|
|
|
link_set_netns (NMPlatform *platform,
|
|
|
|
|
int ifindex,
|
|
|
|
|
int netns_fd)
|
|
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
0,
|
|
|
|
|
ifindex,
|
|
|
|
|
NULL,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_NET_NS_FD, 4, &netns_fd);
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (do_change_link (platform, CHANGE_LINK_TYPE_UNSPEC, ifindex, nlmsg, NULL) >= 0);
|
2016-03-08 13:02:58 +01:00
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
2015-11-02 14:27:22 +01:00
|
|
|
link_change_flags (NMPlatform *platform,
|
|
|
|
|
int ifindex,
|
|
|
|
|
unsigned flags_mask,
|
|
|
|
|
unsigned flags_set)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2015-11-02 14:27:22 +01:00
|
|
|
char s_flags[100];
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2015-11-02 14:27:22 +01:00
|
|
|
_LOGD ("link: change %d: flags: set 0x%x/0x%x ([%s] / [%s])",
|
|
|
|
|
ifindex,
|
|
|
|
|
flags_set,
|
|
|
|
|
flags_mask,
|
|
|
|
|
nm_platform_link_flags2str (flags_set, s_flags, sizeof (s_flags)),
|
|
|
|
|
nm_platform_link_flags2str (flags_mask, NULL, 0));
|
2014-03-05 10:56:16 +01:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
0,
|
|
|
|
|
ifindex,
|
|
|
|
|
NULL,
|
2015-11-02 14:27:22 +01:00
|
|
|
flags_mask,
|
|
|
|
|
flags_set);
|
2015-10-20 09:27:16 +02:00
|
|
|
if (!nlmsg)
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return -NME_UNSPEC;
|
2017-10-23 13:17:21 +02:00
|
|
|
return do_change_link (platform, CHANGE_LINK_TYPE_UNSPEC, ifindex, nlmsg, NULL);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
2015-06-15 17:41:27 +02:00
|
|
|
link_set_up (NMPlatform *platform, int ifindex, gboolean *out_no_firmware)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
int r;
|
2015-06-15 17:41:27 +02:00
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
r = link_change_flags (platform, ifindex, IFF_UP, IFF_UP);
|
|
|
|
|
NM_SET_OUT (out_no_firmware, (r == -NME_PL_NO_FIRMWARE));
|
|
|
|
|
return r >= 0;
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
link_set_down (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (link_change_flags (platform, ifindex, IFF_UP, 0) >= 0);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
link_set_arp (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (link_change_flags (platform, ifindex, IFF_NOARP, 0) >= 0);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
link_set_noarp (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (link_change_flags (platform, ifindex, IFF_NOARP, IFF_NOARP) >= 0);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
2015-06-15 14:41:35 +02:00
|
|
|
static const char *
|
|
|
|
|
link_get_udi (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2017-07-04 12:49:47 +02:00
|
|
|
const NMPObject *obj = nm_platform_link_get_obj (platform, ifindex, TRUE);
|
2015-06-15 14:41:35 +02:00
|
|
|
|
|
|
|
|
if ( !obj
|
|
|
|
|
|| !obj->_link.netlink.is_in_netlink
|
|
|
|
|
|| !obj->_link.udev.device)
|
|
|
|
|
return NULL;
|
2017-03-12 15:54:02 +01:00
|
|
|
return udev_device_get_syspath (obj->_link.udev.device);
|
2015-06-15 14:41:35 +02:00
|
|
|
}
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
2014-07-24 15:57:08 -05:00
|
|
|
link_set_user_ipv6ll_enabled (NMPlatform *platform, int ifindex, gboolean enabled)
|
|
|
|
|
{
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
guint8 mode = enabled ? NM_IN6_ADDR_GEN_MODE_NONE : NM_IN6_ADDR_GEN_MODE_EUI64;
|
2014-07-24 15:57:08 -05:00
|
|
|
|
platform: cleanup detecting kernel support for IFA_FLAGS and IPv6LL
- cache the result in NMPlatformPrivate. No need to call the virtual
function every time. The result is not ever going to change.
- if we are unable to detect support, assume support. Those features
were added quite a while ago to kernel, we should default to "support".
Note, that we detect support based on the presence of the absence of
certain netlink flags. That means, we will still detect no support.
The only moment when we actually use the fallback value, is when we
didn't encounter an RTM_NEWADDR or AF_INET6-IFLA_AF_SPEC message yet,
which would be very unusual, because we fill the cache initially and
usually will have some addresses there.
- for no strong reason, track "undetected" as numerical value zero,
and "support"/"no-support" as 1/-1. We already did that previously for
_support_user_ipv6ll, so this just unifies the implementations.
The minor reason is that this puts @_support_user_ipv6ll to the BSS
section and allows us to omit initializing priv->check_support_user_ipv6ll_cached
in platforms constructor.
- detect _support_kernel_extended_ifa_flags also based on IPv4
RTM_NEWADDR messages. Originally, extended flags were added for IPv6,
and later to IPv4 as well. Once we see an IPv4 message with IFA_FLAGS,
we know we have support.
2017-08-16 11:58:57 +02:00
|
|
|
_LOGD ("link: change %d: user-ipv6ll: set IPv6 address generation mode to %s",
|
|
|
|
|
ifindex,
|
|
|
|
|
nm_platform_link_inet6_addrgenmode2str (mode, NULL, 0));
|
|
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
if (!_support_user_ipv6ll_get ()) {
|
|
|
|
|
_LOGD ("link: change %d: user-ipv6ll: not supported", ifindex);
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return -NME_PL_OPNOTSUPP;
|
2014-07-24 15:57:08 -05:00
|
|
|
}
|
2015-10-20 09:27:16 +02:00
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
0,
|
|
|
|
|
ifindex,
|
|
|
|
|
NULL,
|
2015-11-02 14:27:22 +01:00
|
|
|
0,
|
|
|
|
|
0);
|
2015-10-20 09:27:16 +02:00
|
|
|
if ( !nlmsg
|
2016-04-30 16:48:32 +02:00
|
|
|
|| !_nl_msg_new_link_set_afspec (nlmsg, mode, NULL))
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
g_return_val_if_reached (-NME_BUG);
|
2016-04-30 16:48:32 +02:00
|
|
|
|
2017-10-23 13:17:21 +02:00
|
|
|
return do_change_link (platform, CHANGE_LINK_TYPE_UNSPEC, ifindex, nlmsg, NULL);
|
2016-04-30 16:48:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
link_set_token (NMPlatform *platform, int ifindex, NMUtilsIPv6IfaceId iid)
|
|
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2018-11-26 16:49:51 +01:00
|
|
|
char sbuf[NM_UTILS_INET_ADDRSTRLEN];
|
2016-04-30 16:48:32 +02:00
|
|
|
|
|
|
|
|
_LOGD ("link: change %d: token: set IPv6 address generation token to %s",
|
2018-11-26 16:49:51 +01:00
|
|
|
ifindex, nm_utils_inet6_interface_identifier_to_token (iid, sbuf));
|
2016-04-30 16:48:32 +02:00
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK, 0, ifindex, NULL, 0, 0);
|
|
|
|
|
if (!nlmsg || !_nl_msg_new_link_set_afspec (nlmsg, -1, &iid))
|
2015-12-14 19:18:35 +01:00
|
|
|
g_return_val_if_reached (FALSE);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (do_change_link (platform, CHANGE_LINK_TYPE_UNSPEC, ifindex, nlmsg, NULL) >= 0);
|
2014-07-24 15:57:08 -05:00
|
|
|
}
|
|
|
|
|
|
2013-05-20 15:38:54 -03:00
|
|
|
static gboolean
|
|
|
|
|
link_supports_carrier_detect (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
2013-05-20 15:38:54 -03:00
|
|
|
|
2016-02-19 01:06:28 +01:00
|
|
|
if (!nm_platform_netns_push (platform, &netns))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
2013-05-20 15:38:54 -03:00
|
|
|
/* We use netlink for the actual carrier detection, but netlink can't tell
|
|
|
|
|
* us whether the device actually supports carrier detection in the first
|
|
|
|
|
* place. We assume any device that does implements one of these two APIs.
|
|
|
|
|
*/
|
2016-12-11 22:46:14 +01:00
|
|
|
return nmp_utils_ethtool_supports_carrier_detect (ifindex) || nmp_utils_mii_supports_carrier_detect (ifindex);
|
2013-03-27 22:53:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
link_supports_vlans (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
const NMPObject *obj;
|
|
|
|
|
|
2017-07-04 12:49:47 +02:00
|
|
|
obj = nm_platform_link_get_obj (platform, ifindex, TRUE);
|
2013-03-27 22:53:55 +01:00
|
|
|
|
2013-04-17 12:30:09 +02:00
|
|
|
/* Only ARPHRD_ETHER links can possibly support VLANs. */
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
if (!obj || obj->link.arptype != ARPHRD_ETHER)
|
2013-03-27 22:53:55 +01:00
|
|
|
return FALSE;
|
|
|
|
|
|
2016-02-19 01:06:28 +01:00
|
|
|
if (!nm_platform_netns_push (platform, &netns))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
2016-12-11 22:46:14 +01:00
|
|
|
return nmp_utils_ethtool_supports_vlans (ifindex);
|
2013-03-27 22:53:55 +01:00
|
|
|
}
|
|
|
|
|
|
2017-04-14 23:03:33 +02:00
|
|
|
static gboolean
|
|
|
|
|
link_supports_sriov (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
|
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
|
|
|
|
nm_auto_close int dirfd = -1;
|
|
|
|
|
char ifname[IFNAMSIZ];
|
|
|
|
|
int total = -1;
|
|
|
|
|
|
|
|
|
|
if (!nm_platform_netns_push (platform, &netns))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
dirfd = nm_platform_sysctl_open_netdir (platform, ifindex, ifname);
|
|
|
|
|
if (dirfd < 0)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
total = nm_platform_sysctl_get_int32 (platform,
|
|
|
|
|
NMP_SYSCTL_PATHID_NETDIR (dirfd,
|
|
|
|
|
ifname,
|
|
|
|
|
"device/sriov_totalvfs"),
|
|
|
|
|
-1);
|
|
|
|
|
|
|
|
|
|
return total > 0;
|
|
|
|
|
}
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
2013-03-27 22:53:55 +01:00
|
|
|
link_set_address (NMPlatform *platform, int ifindex, gconstpointer address, size_t length)
|
|
|
|
|
{
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2017-10-23 13:17:21 +02:00
|
|
|
const ChangeLinkData d = {
|
|
|
|
|
.set_address = {
|
|
|
|
|
.address = address,
|
|
|
|
|
.length = length,
|
|
|
|
|
},
|
|
|
|
|
};
|
2013-03-27 22:53:55 +01:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
if (!address || !length)
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
g_return_val_if_reached (-NME_BUG);
|
2013-03-27 22:53:55 +01:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
0,
|
|
|
|
|
ifindex,
|
|
|
|
|
NULL,
|
2015-11-02 14:27:22 +01:00
|
|
|
0,
|
|
|
|
|
0);
|
2015-10-20 09:27:16 +02:00
|
|
|
if (!nlmsg)
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
g_return_val_if_reached (-NME_BUG);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_ADDRESS, length, address);
|
|
|
|
|
|
2017-10-23 13:17:21 +02:00
|
|
|
return do_change_link (platform, CHANGE_LINK_TYPE_SET_ADDRESS, ifindex, nlmsg, &d);
|
2015-10-20 09:27:16 +02:00
|
|
|
nla_put_failure:
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
g_return_val_if_reached (-NME_BUG);
|
2013-03-27 22:53:55 +01:00
|
|
|
}
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
2017-07-03 10:10:34 +02:00
|
|
|
link_set_name (NMPlatform *platform, int ifindex, const char *name)
|
|
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
0,
|
|
|
|
|
ifindex,
|
|
|
|
|
NULL,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
g_return_val_if_reached (-NME_BUG);
|
2017-07-03 10:10:34 +02:00
|
|
|
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_IFNAME, strlen (name) + 1, name);
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (do_change_link (platform, CHANGE_LINK_TYPE_UNSPEC, ifindex, nlmsg, NULL) >= 0);
|
2017-07-03 10:10:34 +02:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-03 17:37:26 -05:00
|
|
|
static gboolean
|
|
|
|
|
link_get_permanent_address (NMPlatform *platform,
|
|
|
|
|
int ifindex,
|
|
|
|
|
guint8 *buf,
|
|
|
|
|
size_t *length)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
|
|
|
|
|
|
|
|
|
if (!nm_platform_netns_push (platform, &netns))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
2016-12-11 22:46:14 +01:00
|
|
|
return nmp_utils_ethtool_get_permanent_address (ifindex, buf, length);
|
2014-10-03 17:37:26 -05:00
|
|
|
}
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
2013-04-15 21:48:12 +02:00
|
|
|
link_set_mtu (NMPlatform *platform, int ifindex, guint32 mtu)
|
|
|
|
|
{
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2013-04-15 21:48:12 +02:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
0,
|
|
|
|
|
ifindex,
|
|
|
|
|
NULL,
|
2015-11-02 14:27:22 +01:00
|
|
|
0,
|
|
|
|
|
0);
|
2015-10-20 09:27:16 +02:00
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_MTU, mtu);
|
|
|
|
|
|
2017-10-23 15:16:50 +02:00
|
|
|
return do_change_link (platform, CHANGE_LINK_TYPE_SET_MTU, ifindex, nlmsg, NULL);
|
2015-10-20 09:27:16 +02:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
2013-04-15 21:48:12 +02:00
|
|
|
}
|
|
|
|
|
|
2017-04-14 23:03:33 +02:00
|
|
|
static gboolean
|
2018-05-23 14:11:14 +02:00
|
|
|
link_set_sriov_params (NMPlatform *platform,
|
|
|
|
|
int ifindex,
|
|
|
|
|
guint num_vfs,
|
2018-12-04 14:09:50 +01:00
|
|
|
NMTernary autoprobe)
|
2017-04-14 23:03:33 +02:00
|
|
|
{
|
|
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
|
|
|
|
nm_auto_close int dirfd = -1;
|
2018-12-12 14:17:24 +01:00
|
|
|
int current_autoprobe;
|
|
|
|
|
guint total;
|
|
|
|
|
gint64 current_num;
|
2017-04-14 23:03:33 +02:00
|
|
|
char ifname[IFNAMSIZ];
|
|
|
|
|
char buf[64];
|
2019-01-31 13:29:21 +01:00
|
|
|
int errsv;
|
2017-04-14 23:03:33 +02:00
|
|
|
|
|
|
|
|
if (!nm_platform_netns_push (platform, &netns))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
dirfd = nm_platform_sysctl_open_netdir (platform, ifindex, ifname);
|
|
|
|
|
if (!dirfd)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
2018-05-23 14:11:14 +02:00
|
|
|
total = nm_platform_sysctl_get_int_checked (platform,
|
|
|
|
|
NMP_SYSCTL_PATHID_NETDIR (dirfd,
|
|
|
|
|
ifname,
|
|
|
|
|
"device/sriov_totalvfs"),
|
|
|
|
|
10, 0, G_MAXUINT, 0);
|
|
|
|
|
if (errno)
|
2017-04-14 23:03:33 +02:00
|
|
|
return FALSE;
|
|
|
|
|
if (num_vfs > total) {
|
|
|
|
|
_LOGW ("link: %d only supports %u VFs (requested %u)", ifindex, total, num_vfs);
|
|
|
|
|
num_vfs = total;
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-23 14:11:14 +02:00
|
|
|
/*
|
|
|
|
|
* Take special care when setting new values:
|
|
|
|
|
* - don't touch anything if the right values are already set
|
|
|
|
|
* - to change the number of VFs or autoprobe we need to destroy existing VFs
|
|
|
|
|
* - the autoprobe setting is irrelevant when numvfs is zero
|
|
|
|
|
*/
|
|
|
|
|
current_num = nm_platform_sysctl_get_int_checked (platform,
|
|
|
|
|
NMP_SYSCTL_PATHID_NETDIR (dirfd,
|
|
|
|
|
ifname,
|
|
|
|
|
"device/sriov_numvfs"),
|
2018-12-12 14:17:24 +01:00
|
|
|
10, 0, G_MAXUINT, -1);
|
2018-05-23 14:11:14 +02:00
|
|
|
current_autoprobe = nm_platform_sysctl_get_int_checked (platform,
|
|
|
|
|
NMP_SYSCTL_PATHID_NETDIR (dirfd,
|
|
|
|
|
ifname,
|
|
|
|
|
"device/sriov_drivers_autoprobe"),
|
2018-12-12 14:17:24 +01:00
|
|
|
10, 0, 1, -1);
|
2018-05-23 14:11:14 +02:00
|
|
|
if ( current_num == num_vfs
|
2018-12-04 14:09:50 +01:00
|
|
|
&& (autoprobe == NM_TERNARY_DEFAULT || current_autoprobe == autoprobe))
|
2017-04-14 23:03:33 +02:00
|
|
|
return TRUE;
|
|
|
|
|
|
2018-05-23 14:11:14 +02:00
|
|
|
if (current_num != 0) {
|
|
|
|
|
/* We need to destroy all other VFs before changing any value */
|
2017-04-14 23:03:33 +02:00
|
|
|
if (!nm_platform_sysctl_set (NM_PLATFORM_GET,
|
|
|
|
|
NMP_SYSCTL_PATHID_NETDIR (dirfd,
|
|
|
|
|
ifname,
|
|
|
|
|
"device/sriov_numvfs"),
|
|
|
|
|
"0")) {
|
2019-01-31 13:29:21 +01:00
|
|
|
errsv = errno;
|
2019-01-31 17:22:18 +01:00
|
|
|
_LOGW ("link: couldn't reset SR-IOV num_vfs: %s", nm_strerror_native (errsv));
|
2017-04-14 23:03:33 +02:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-23 14:11:14 +02:00
|
|
|
if (num_vfs == 0)
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
2018-12-04 14:09:50 +01:00
|
|
|
if ( NM_IN_SET (autoprobe, NM_TERNARY_TRUE, NM_TERNARY_FALSE)
|
2018-05-23 14:11:14 +02:00
|
|
|
&& current_autoprobe != autoprobe
|
|
|
|
|
&& !nm_platform_sysctl_set (NM_PLATFORM_GET,
|
|
|
|
|
NMP_SYSCTL_PATHID_NETDIR (dirfd,
|
|
|
|
|
ifname,
|
|
|
|
|
"device/sriov_drivers_autoprobe"),
|
2018-12-04 14:09:50 +01:00
|
|
|
nm_sprintf_buf (buf, "%d", (int) autoprobe))) {
|
2019-01-31 13:29:21 +01:00
|
|
|
errsv = errno;
|
2019-01-31 17:22:18 +01:00
|
|
|
_LOGW ("link: couldn't set SR-IOV drivers-autoprobe to %d: %s", (int) autoprobe, nm_strerror_native (errsv));
|
2018-05-23 14:11:14 +02:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-14 23:03:33 +02:00
|
|
|
if (!nm_platform_sysctl_set (NM_PLATFORM_GET,
|
|
|
|
|
NMP_SYSCTL_PATHID_NETDIR (dirfd,
|
|
|
|
|
ifname,
|
|
|
|
|
"device/sriov_numvfs"),
|
2018-05-23 14:11:14 +02:00
|
|
|
nm_sprintf_buf (buf, "%u", num_vfs))) {
|
2019-01-31 13:29:21 +01:00
|
|
|
errsv = errno;
|
2019-01-31 17:22:18 +01:00
|
|
|
_LOGW ("link: couldn't set SR-IOV num_vfs to %d: %s", num_vfs, nm_strerror_native (errsv));
|
2017-04-14 23:03:33 +02:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-23 14:33:24 +02:00
|
|
|
static gboolean
|
|
|
|
|
link_set_sriov_vfs (NMPlatform *platform, int ifindex, const NMPlatformVF *const *vfs)
|
|
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
struct nlattr *list, *info, *vlan_list;
|
|
|
|
|
guint i;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
0,
|
|
|
|
|
ifindex,
|
|
|
|
|
NULL,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
g_return_val_if_reached (-NME_BUG);
|
2018-05-23 14:33:24 +02:00
|
|
|
|
|
|
|
|
if (!(list = nla_nest_start (nlmsg, IFLA_VFINFO_LIST)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
for (i = 0; vfs[i]; i++) {
|
|
|
|
|
const NMPlatformVF *vf = vfs[i];
|
|
|
|
|
|
|
|
|
|
if (!(info = nla_nest_start (nlmsg, IFLA_VF_INFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (vf->spoofchk >= 0) {
|
|
|
|
|
struct _ifla_vf_setting ivs = { 0 };
|
|
|
|
|
|
|
|
|
|
ivs.vf = vf->index;
|
|
|
|
|
ivs.setting = vf->spoofchk;
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_VF_SPOOFCHK, sizeof (ivs), &ivs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vf->trust >= 0) {
|
|
|
|
|
struct _ifla_vf_setting ivs = { 0 };
|
|
|
|
|
|
|
|
|
|
ivs.vf = vf->index;
|
|
|
|
|
ivs.setting = vf->trust;
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_VF_TRUST, sizeof (ivs), &ivs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vf->mac.len) {
|
|
|
|
|
struct ifla_vf_mac ivm = { 0 };
|
|
|
|
|
|
|
|
|
|
ivm.vf = vf->index;
|
|
|
|
|
memcpy (ivm.mac, vf->mac.data, vf->mac.len);
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_VF_MAC, sizeof (ivm), &ivm);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vf->min_tx_rate || vf->max_tx_rate) {
|
|
|
|
|
struct _ifla_vf_rate ivr = { 0 };
|
|
|
|
|
|
|
|
|
|
ivr.vf = vf->index;
|
|
|
|
|
ivr.min_tx_rate = vf->min_tx_rate;
|
|
|
|
|
ivr.max_tx_rate = vf->max_tx_rate;
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_VF_RATE, sizeof (ivr), &ivr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Kernel only supports one VLAN per VF now. If this
|
|
|
|
|
* changes in the future, we need to figure out how to
|
|
|
|
|
* clear existing VLANs and set new ones in one message
|
|
|
|
|
* with the new API.*/
|
|
|
|
|
if (vf->num_vlans > 1) {
|
|
|
|
|
_LOGW ("multiple VLANs per VF are not supported at the moment");
|
|
|
|
|
return FALSE;
|
|
|
|
|
} else {
|
|
|
|
|
struct _ifla_vf_vlan_info ivvi = { 0 };
|
|
|
|
|
|
|
|
|
|
if (!(vlan_list = nla_nest_start (nlmsg, IFLA_VF_VLAN_LIST)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
ivvi.vf = vf->index;
|
|
|
|
|
if (vf->num_vlans == 1) {
|
|
|
|
|
ivvi.vlan = vf->vlans[0].id;
|
|
|
|
|
ivvi.qos = vf->vlans[0].qos;
|
|
|
|
|
ivvi.vlan_proto = htons (vf->vlans[0].proto_ad ? ETH_P_8021AD : ETH_P_8021Q);
|
|
|
|
|
} else {
|
|
|
|
|
/* Clear existing VLAN */
|
|
|
|
|
ivvi.vlan = 0;
|
|
|
|
|
ivvi.qos = 0;
|
|
|
|
|
ivvi.vlan_proto = htons (ETH_P_8021Q);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_VF_VLAN_INFO, sizeof (ivvi), &ivvi);
|
|
|
|
|
nla_nest_end (nlmsg, vlan_list);
|
|
|
|
|
}
|
|
|
|
|
nla_nest_end (nlmsg, info);
|
|
|
|
|
}
|
|
|
|
|
nla_nest_end (nlmsg, list);
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (do_change_link (platform, CHANGE_LINK_TYPE_UNSPEC, ifindex, nlmsg, NULL) >= 0);
|
2018-05-23 14:33:24 +02:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-11 14:59:26 -04:00
|
|
|
static char *
|
|
|
|
|
link_get_physical_port_id (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2016-12-09 12:47:17 +01:00
|
|
|
nm_auto_close int dirfd = -1;
|
|
|
|
|
char ifname_verified[IFNAMSIZ];
|
2013-10-11 14:59:26 -04:00
|
|
|
|
2016-12-09 12:47:17 +01:00
|
|
|
dirfd = nm_platform_sysctl_open_netdir (platform, ifindex, ifname_verified);
|
|
|
|
|
if (dirfd < 0)
|
2013-10-11 14:59:26 -04:00
|
|
|
return NULL;
|
2016-12-09 12:47:17 +01:00
|
|
|
return sysctl_get (platform, NMP_SYSCTL_PATHID_NETDIR (dirfd, ifname_verified, "phys_port_id"));
|
2013-10-11 14:59:26 -04:00
|
|
|
}
|
|
|
|
|
|
2015-03-24 12:35:36 -05:00
|
|
|
static guint
|
|
|
|
|
link_get_dev_id (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2016-12-09 12:47:17 +01:00
|
|
|
nm_auto_close int dirfd = -1;
|
|
|
|
|
char ifname_verified[IFNAMSIZ];
|
2015-03-24 12:35:36 -05:00
|
|
|
|
2016-12-09 12:47:17 +01:00
|
|
|
dirfd = nm_platform_sysctl_open_netdir (platform, ifindex, ifname_verified);
|
|
|
|
|
if (dirfd < 0)
|
2015-03-24 12:35:36 -05:00
|
|
|
return 0;
|
2016-12-09 12:47:17 +01:00
|
|
|
return nm_platform_sysctl_get_int_checked (platform,
|
|
|
|
|
NMP_SYSCTL_PATHID_NETDIR (dirfd, ifname_verified, "dev_id"),
|
|
|
|
|
16, 0, G_MAXUINT16, 0);
|
2015-03-24 12:35:36 -05:00
|
|
|
}
|
|
|
|
|
|
2018-06-27 13:57:13 +02:00
|
|
|
static gboolean
|
2014-09-18 12:53:19 -05:00
|
|
|
vlan_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
|
|
|
|
int parent,
|
|
|
|
|
int vlan_id,
|
|
|
|
|
guint32 vlan_flags,
|
2015-12-09 15:13:57 +01:00
|
|
|
const NMPlatformLink **out_link)
|
2013-03-27 22:53:55 +01:00
|
|
|
{
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2013-03-27 22:53:55 +01:00
|
|
|
|
2015-10-27 18:24:13 +01:00
|
|
|
G_STATIC_ASSERT (NM_VLAN_FLAG_REORDER_HEADERS == (guint32) VLAN_FLAG_REORDER_HDR);
|
|
|
|
|
G_STATIC_ASSERT (NM_VLAN_FLAG_GVRP == (guint32) VLAN_FLAG_GVRP);
|
|
|
|
|
G_STATIC_ASSERT (NM_VLAN_FLAG_LOOSE_BINDING == (guint32) VLAN_FLAG_LOOSE_BINDING);
|
|
|
|
|
G_STATIC_ASSERT (NM_VLAN_FLAG_MVRP == (guint32) VLAN_FLAG_MVRP);
|
2015-10-27 17:24:11 +01:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
vlan_flags &= (guint32) NM_VLAN_FLAGS_ALL;
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
2016-04-08 15:05:35 +02:00
|
|
|
NLM_F_CREATE | NLM_F_EXCL,
|
2015-10-20 09:27:16 +02:00
|
|
|
0,
|
|
|
|
|
name,
|
2015-11-02 14:27:22 +01:00
|
|
|
0,
|
2015-10-20 09:27:16 +02:00
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
2013-03-27 22:53:55 +01:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_LINK, parent);
|
2014-03-05 10:56:16 +01:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
if (!_nl_msg_new_link_set_linkinfo_vlan (nlmsg,
|
|
|
|
|
vlan_id,
|
|
|
|
|
NM_VLAN_FLAGS_ALL,
|
|
|
|
|
vlan_flags,
|
|
|
|
|
NULL,
|
|
|
|
|
0,
|
|
|
|
|
NULL,
|
|
|
|
|
0))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
return (do_add_link_with_lookup (platform, NM_LINK_TYPE_VLAN, name, nlmsg, out_link) >= 0);
|
2015-10-20 09:27:16 +02:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
2013-03-27 22:53:55 +01:00
|
|
|
}
|
|
|
|
|
|
2018-06-27 13:57:13 +02:00
|
|
|
static gboolean
|
2015-09-01 22:11:47 +02:00
|
|
|
link_gre_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
2015-12-11 13:34:50 +01:00
|
|
|
const NMPlatformLnkGre *props,
|
2015-12-09 15:13:57 +01:00
|
|
|
const NMPlatformLink **out_link)
|
2015-09-01 22:11:47 +02:00
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
struct nlattr *info;
|
|
|
|
|
struct nlattr *data;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
2016-04-08 15:05:35 +02:00
|
|
|
NLM_F_CREATE | NLM_F_EXCL,
|
2015-09-01 22:11:47 +02:00
|
|
|
0,
|
|
|
|
|
name,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
if (!(info = nla_nest_start (nlmsg, IFLA_LINKINFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
2018-06-26 10:45:35 +02:00
|
|
|
NLA_PUT_STRING (nlmsg, IFLA_INFO_KIND, props->is_tap ? "gretap" : "gre");
|
2015-09-01 22:11:47 +02:00
|
|
|
|
|
|
|
|
if (!(data = nla_nest_start (nlmsg, IFLA_INFO_DATA)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (props->parent_ifindex)
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_GRE_LINK, props->parent_ifindex);
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_GRE_LOCAL, props->local);
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_GRE_REMOTE, props->remote);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_GRE_TTL, props->ttl);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_GRE_TOS, props->tos);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_GRE_PMTUDISC, !!props->path_mtu_discovery);
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_GRE_IKEY, htonl (props->input_key));
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_GRE_OKEY, htonl (props->output_key));
|
2018-06-27 17:16:11 +02:00
|
|
|
NLA_PUT_U16 (nlmsg, IFLA_GRE_IFLAGS, htons (props->input_flags));
|
|
|
|
|
NLA_PUT_U16 (nlmsg, IFLA_GRE_OFLAGS, htons (props->output_flags));
|
2015-09-01 22:11:47 +02:00
|
|
|
|
|
|
|
|
nla_nest_end (nlmsg, data);
|
|
|
|
|
nla_nest_end (nlmsg, info);
|
|
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
return (do_add_link_with_lookup (platform,
|
|
|
|
|
props->is_tap ? NM_LINK_TYPE_GRETAP : NM_LINK_TYPE_GRE,
|
|
|
|
|
name, nlmsg, out_link) >= 0);
|
2015-09-01 22:11:47 +02:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-27 13:57:13 +02:00
|
|
|
static gboolean
|
2015-11-27 22:22:25 +01:00
|
|
|
link_ip6tnl_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
2015-12-11 13:34:50 +01:00
|
|
|
const NMPlatformLnkIp6Tnl *props,
|
2015-12-09 15:13:57 +01:00
|
|
|
const NMPlatformLink **out_link)
|
2015-11-27 22:22:25 +01:00
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
struct nlattr *info;
|
|
|
|
|
struct nlattr *data;
|
|
|
|
|
guint32 flowinfo;
|
|
|
|
|
|
2018-06-26 12:06:43 +02:00
|
|
|
g_return_val_if_fail (!props->is_gre, FALSE);
|
|
|
|
|
|
2015-11-27 22:22:25 +01:00
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
2016-04-08 15:05:35 +02:00
|
|
|
NLM_F_CREATE | NLM_F_EXCL,
|
2015-11-27 22:22:25 +01:00
|
|
|
0,
|
|
|
|
|
name,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
if (!(info = nla_nest_start (nlmsg, IFLA_LINKINFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (nlmsg, IFLA_INFO_KIND, "ip6tnl");
|
|
|
|
|
|
|
|
|
|
if (!(data = nla_nest_start (nlmsg, IFLA_INFO_DATA)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (props->parent_ifindex)
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_IPTUN_LINK, props->parent_ifindex);
|
|
|
|
|
|
|
|
|
|
if (memcmp (&props->local, &in6addr_any, sizeof (in6addr_any)))
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_IPTUN_LOCAL, sizeof (props->local), &props->local);
|
|
|
|
|
if (memcmp (&props->remote, &in6addr_any, sizeof (in6addr_any)))
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_IPTUN_REMOTE, sizeof (props->remote), &props->remote);
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_IPTUN_TTL, props->ttl);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_IPTUN_ENCAP_LIMIT, props->encap_limit);
|
|
|
|
|
|
|
|
|
|
flowinfo = props->flow_label & IP6_FLOWINFO_FLOWLABEL_MASK;
|
|
|
|
|
flowinfo |= (props->tclass << IP6_FLOWINFO_TCLASS_SHIFT)
|
|
|
|
|
& IP6_FLOWINFO_TCLASS_MASK;
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_IPTUN_FLOWINFO, htonl (flowinfo));
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_IPTUN_PROTO, props->proto);
|
2017-12-22 10:24:25 +01:00
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_IPTUN_FLAGS, props->flags);
|
2015-11-27 22:22:25 +01:00
|
|
|
|
|
|
|
|
nla_nest_end (nlmsg, data);
|
|
|
|
|
nla_nest_end (nlmsg, info);
|
|
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
return (do_add_link_with_lookup (platform, NM_LINK_TYPE_IP6TNL, name, nlmsg, out_link) >= 0);
|
2015-11-27 22:22:25 +01:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-26 12:06:43 +02:00
|
|
|
static gboolean
|
|
|
|
|
link_ip6gre_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
|
|
|
|
const NMPlatformLnkIp6Tnl *props,
|
|
|
|
|
const NMPlatformLink **out_link)
|
|
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
struct nlattr *info;
|
|
|
|
|
struct nlattr *data;
|
|
|
|
|
guint32 flowinfo;
|
|
|
|
|
|
|
|
|
|
g_return_val_if_fail (props->is_gre, FALSE);
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
NLM_F_CREATE | NLM_F_EXCL,
|
|
|
|
|
0,
|
|
|
|
|
name,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
if (!(info = nla_nest_start (nlmsg, IFLA_LINKINFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (nlmsg, IFLA_INFO_KIND, props->is_tap ? "ip6gretap" : "ip6gre");
|
|
|
|
|
|
|
|
|
|
if (!(data = nla_nest_start (nlmsg, IFLA_INFO_DATA)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (props->parent_ifindex)
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_GRE_LINK, props->parent_ifindex);
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_GRE_IKEY, htonl (props->input_key));
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_GRE_OKEY, htonl (props->output_key));
|
|
|
|
|
NLA_PUT_U16 (nlmsg, IFLA_GRE_IFLAGS, htons (props->input_flags));
|
|
|
|
|
NLA_PUT_U16 (nlmsg, IFLA_GRE_OFLAGS, htons (props->output_flags));
|
|
|
|
|
|
|
|
|
|
if (memcmp (&props->local, &in6addr_any, sizeof (in6addr_any)))
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_GRE_LOCAL, sizeof (props->local), &props->local);
|
|
|
|
|
if (memcmp (&props->remote, &in6addr_any, sizeof (in6addr_any)))
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_GRE_REMOTE, sizeof (props->remote), &props->remote);
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_GRE_TTL, props->ttl);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_GRE_ENCAP_LIMIT, props->encap_limit);
|
|
|
|
|
|
|
|
|
|
flowinfo = props->flow_label & IP6_FLOWINFO_FLOWLABEL_MASK;
|
|
|
|
|
flowinfo |= (props->tclass << IP6_FLOWINFO_TCLASS_SHIFT)
|
|
|
|
|
& IP6_FLOWINFO_TCLASS_MASK;
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_GRE_FLOWINFO, htonl (flowinfo));
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_GRE_FLAGS, props->flags);
|
|
|
|
|
|
|
|
|
|
nla_nest_end (nlmsg, data);
|
|
|
|
|
nla_nest_end (nlmsg, info);
|
|
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
return (do_add_link_with_lookup (platform,
|
|
|
|
|
props->is_tap ? NM_LINK_TYPE_IP6GRETAP : NM_LINK_TYPE_IP6GRE,
|
|
|
|
|
name, nlmsg, out_link) >= 0);
|
2018-06-26 12:06:43 +02:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-27 13:57:13 +02:00
|
|
|
static gboolean
|
2015-11-27 14:01:56 +01:00
|
|
|
link_ipip_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
2015-12-11 13:34:50 +01:00
|
|
|
const NMPlatformLnkIpIp *props,
|
2015-12-09 15:13:57 +01:00
|
|
|
const NMPlatformLink **out_link)
|
2015-11-27 14:01:56 +01:00
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
struct nlattr *info;
|
|
|
|
|
struct nlattr *data;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
2016-04-08 15:05:35 +02:00
|
|
|
NLM_F_CREATE | NLM_F_EXCL,
|
2015-11-27 14:01:56 +01:00
|
|
|
0,
|
|
|
|
|
name,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
if (!(info = nla_nest_start (nlmsg, IFLA_LINKINFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (nlmsg, IFLA_INFO_KIND, "ipip");
|
|
|
|
|
|
|
|
|
|
if (!(data = nla_nest_start (nlmsg, IFLA_INFO_DATA)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (props->parent_ifindex)
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_IPTUN_LINK, props->parent_ifindex);
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_IPTUN_LOCAL, props->local);
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_IPTUN_REMOTE, props->remote);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_IPTUN_TTL, props->ttl);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_IPTUN_TOS, props->tos);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_IPTUN_PMTUDISC, !!props->path_mtu_discovery);
|
|
|
|
|
|
|
|
|
|
nla_nest_end (nlmsg, data);
|
|
|
|
|
nla_nest_end (nlmsg, info);
|
|
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
return (do_add_link_with_lookup (platform, NM_LINK_TYPE_IPIP, name, nlmsg, out_link) >= 0);
|
2015-11-27 14:01:56 +01:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-27 13:57:13 +02:00
|
|
|
static gboolean
|
2016-06-30 18:20:09 +02:00
|
|
|
link_macsec_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
|
|
|
|
int parent,
|
|
|
|
|
const NMPlatformLnkMacsec *props,
|
|
|
|
|
const NMPlatformLink **out_link)
|
|
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
struct nlattr *info;
|
|
|
|
|
struct nlattr *data;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
NLM_F_CREATE | NLM_F_EXCL,
|
|
|
|
|
0,
|
|
|
|
|
name,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_LINK, parent);
|
|
|
|
|
|
|
|
|
|
if (!(info = nla_nest_start (nlmsg, IFLA_LINKINFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (nlmsg, IFLA_INFO_KIND, "macsec");
|
|
|
|
|
|
|
|
|
|
if (!(data = nla_nest_start (nlmsg, IFLA_INFO_DATA)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (props->icv_length)
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_MACSEC_ICV_LEN, 16);
|
|
|
|
|
if (props->cipher_suite)
|
|
|
|
|
NLA_PUT_U64 (nlmsg, IFLA_MACSEC_CIPHER_SUITE, props->cipher_suite);
|
|
|
|
|
if (props->replay_protect)
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_MACSEC_WINDOW, props->window);
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U64 (nlmsg, IFLA_MACSEC_SCI, htobe64 (props->sci));
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_MACSEC_ENCODING_SA, props->encoding_sa);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_MACSEC_ENCRYPT, props->encrypt);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_MACSEC_PROTECT, props->protect);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_MACSEC_INC_SCI, props->include_sci);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_MACSEC_ES, props->es);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_MACSEC_SCB, props->scb);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_MACSEC_REPLAY_PROTECT, props->replay_protect);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_MACSEC_VALIDATION, props->validation);
|
|
|
|
|
|
|
|
|
|
nla_nest_end (nlmsg, data);
|
|
|
|
|
nla_nest_end (nlmsg, info);
|
|
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
return (do_add_link_with_lookup (platform,
|
|
|
|
|
NM_LINK_TYPE_MACSEC,
|
|
|
|
|
name, nlmsg, out_link) >= 0);
|
2016-06-30 18:20:09 +02:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-27 13:57:13 +02:00
|
|
|
static gboolean
|
2015-12-03 17:09:50 +01:00
|
|
|
link_macvlan_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
|
|
|
|
int parent,
|
2015-12-11 13:34:50 +01:00
|
|
|
const NMPlatformLnkMacvlan *props,
|
2015-12-09 15:13:57 +01:00
|
|
|
const NMPlatformLink **out_link)
|
2015-12-03 17:09:50 +01:00
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
struct nlattr *info;
|
|
|
|
|
struct nlattr *data;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
2016-04-08 15:05:35 +02:00
|
|
|
NLM_F_CREATE | NLM_F_EXCL,
|
2015-12-03 17:09:50 +01:00
|
|
|
0,
|
|
|
|
|
name,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_LINK, parent);
|
|
|
|
|
|
|
|
|
|
if (!(info = nla_nest_start (nlmsg, IFLA_LINKINFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
2015-12-04 09:49:39 +01:00
|
|
|
NLA_PUT_STRING (nlmsg, IFLA_INFO_KIND, props->tap ? "macvtap" : "macvlan");
|
2015-12-03 17:09:50 +01:00
|
|
|
|
|
|
|
|
if (!(data = nla_nest_start (nlmsg, IFLA_INFO_DATA)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_MACVLAN_MODE, props->mode);
|
|
|
|
|
NLA_PUT_U16 (nlmsg, IFLA_MACVLAN_FLAGS, props->no_promisc ? MACVLAN_FLAG_NOPROMISC : 0);
|
|
|
|
|
|
|
|
|
|
nla_nest_end (nlmsg, data);
|
|
|
|
|
nla_nest_end (nlmsg, info);
|
|
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
return (do_add_link_with_lookup (platform,
|
|
|
|
|
props->tap ? NM_LINK_TYPE_MACVTAP : NM_LINK_TYPE_MACVLAN,
|
|
|
|
|
name, nlmsg, out_link) >= 0);
|
2015-12-03 17:09:50 +01:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-27 13:57:13 +02:00
|
|
|
static gboolean
|
2015-11-11 18:41:48 +01:00
|
|
|
link_sit_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
2015-12-11 13:34:50 +01:00
|
|
|
const NMPlatformLnkSit *props,
|
2015-12-09 15:13:57 +01:00
|
|
|
const NMPlatformLink **out_link)
|
2015-11-11 18:41:48 +01:00
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
struct nlattr *info;
|
|
|
|
|
struct nlattr *data;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
2016-04-08 15:05:35 +02:00
|
|
|
NLM_F_CREATE | NLM_F_EXCL,
|
2015-11-11 18:41:48 +01:00
|
|
|
0,
|
|
|
|
|
name,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
if (!(info = nla_nest_start (nlmsg, IFLA_LINKINFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (nlmsg, IFLA_INFO_KIND, "sit");
|
|
|
|
|
|
|
|
|
|
if (!(data = nla_nest_start (nlmsg, IFLA_INFO_DATA)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
if (props->parent_ifindex)
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_IPTUN_LINK, props->parent_ifindex);
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_IPTUN_LOCAL, props->local);
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_IPTUN_REMOTE, props->remote);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_IPTUN_TTL, props->ttl);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_IPTUN_TOS, props->tos);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_IPTUN_PMTUDISC, !!props->path_mtu_discovery);
|
|
|
|
|
|
|
|
|
|
nla_nest_end (nlmsg, data);
|
|
|
|
|
nla_nest_end (nlmsg, info);
|
|
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
return (do_add_link_with_lookup (platform, NM_LINK_TYPE_SIT, name, nlmsg, out_link) >= 0);
|
2015-11-11 18:41:48 +01:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
static gboolean
|
|
|
|
|
link_tun_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
|
|
|
|
const NMPlatformLnkTun *props,
|
2018-04-05 18:23:43 +02:00
|
|
|
const NMPlatformLink **out_link,
|
|
|
|
|
int *out_fd)
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
{
|
|
|
|
|
const NMPObject *obj;
|
|
|
|
|
struct ifreq ifr = { };
|
|
|
|
|
nm_auto_close int fd = -1;
|
|
|
|
|
|
2018-04-05 18:23:43 +02:00
|
|
|
nm_assert (NM_IN_SET (props->type, IFF_TAP, IFF_TUN));
|
|
|
|
|
nm_assert (props->persist || out_fd);
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
|
|
|
|
|
fd = open ("/dev/net/tun", O_RDWR | O_CLOEXEC);
|
|
|
|
|
if (fd < 0)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
nm_utils_ifname_cpy (ifr.ifr_name, name);
|
|
|
|
|
ifr.ifr_flags = ((short) props->type)
|
|
|
|
|
| ((short) IFF_TUN_EXCL)
|
|
|
|
|
| (!props->pi ? (short) IFF_NO_PI : (short) 0)
|
|
|
|
|
| ( props->vnet_hdr ? (short) IFF_VNET_HDR : (short) 0)
|
|
|
|
|
| ( props->multi_queue ? (short) NM_IFF_MULTI_QUEUE : (short) 0);
|
|
|
|
|
if (ioctl (fd, TUNSETIFF, &ifr))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
if (props->owner_valid) {
|
|
|
|
|
if (ioctl (fd, TUNSETOWNER, (uid_t) props->owner))
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (props->group_valid) {
|
|
|
|
|
if (ioctl (fd, TUNSETGROUP, (gid_t) props->group))
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-05 18:23:43 +02:00
|
|
|
if (props->persist) {
|
|
|
|
|
if (ioctl (fd, TUNSETPERSIST, 1))
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
|
|
|
|
|
do_request_link (platform, 0, name);
|
|
|
|
|
obj = nmp_cache_lookup_link_full (nm_platform_get_cache (platform),
|
|
|
|
|
0, name, FALSE,
|
|
|
|
|
NM_LINK_TYPE_TUN,
|
|
|
|
|
NULL, NULL);
|
|
|
|
|
|
2018-04-05 18:23:43 +02:00
|
|
|
if (!obj)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
NM_SET_OUT (out_link, &obj->link);
|
|
|
|
|
NM_SET_OUT (out_fd, nm_steal_fd (&fd));
|
|
|
|
|
return TRUE;
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
}
|
|
|
|
|
|
2015-10-14 10:01:48 +02:00
|
|
|
static gboolean
|
|
|
|
|
link_vxlan_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
2015-12-11 13:34:50 +01:00
|
|
|
const NMPlatformLnkVxlan *props,
|
2015-12-09 15:13:57 +01:00
|
|
|
const NMPlatformLink **out_link)
|
2015-10-14 10:01:48 +02:00
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
struct nlattr *info;
|
|
|
|
|
struct nlattr *data;
|
|
|
|
|
struct nm_ifla_vxlan_port_range port_range;
|
|
|
|
|
|
|
|
|
|
g_return_val_if_fail (props, FALSE);
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
2016-04-08 15:05:35 +02:00
|
|
|
NLM_F_CREATE | NLM_F_EXCL,
|
2015-10-14 10:01:48 +02:00
|
|
|
0,
|
|
|
|
|
name,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
if (!(info = nla_nest_start (nlmsg, IFLA_LINKINFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (nlmsg, IFLA_INFO_KIND, "vxlan");
|
|
|
|
|
|
|
|
|
|
if (!(data = nla_nest_start (nlmsg, IFLA_INFO_DATA)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_VXLAN_ID, props->id);
|
|
|
|
|
|
|
|
|
|
if (props->group)
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_VXLAN_GROUP, sizeof (props->group), &props->group);
|
|
|
|
|
else if (memcmp (&props->group6, &in6addr_any, sizeof (in6addr_any)))
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_VXLAN_GROUP6, sizeof (props->group6), &props->group6);
|
|
|
|
|
|
|
|
|
|
if (props->local)
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_VXLAN_LOCAL, sizeof (props->local), &props->local);
|
|
|
|
|
else if (memcmp (&props->local6, &in6addr_any, sizeof (in6addr_any)))
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_VXLAN_LOCAL6, sizeof (props->local6), &props->local6);
|
|
|
|
|
|
|
|
|
|
if (props->parent_ifindex >= 0)
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_VXLAN_LINK, props->parent_ifindex);
|
|
|
|
|
|
|
|
|
|
if (props->src_port_min || props->src_port_max) {
|
|
|
|
|
port_range.low = htons (props->src_port_min);
|
|
|
|
|
port_range.high = htons (props->src_port_max);
|
|
|
|
|
NLA_PUT (nlmsg, IFLA_VXLAN_PORT_RANGE, sizeof (port_range), &port_range);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U16 (nlmsg, IFLA_VXLAN_PORT, htons (props->dst_port));
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_VXLAN_TOS, props->tos);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_VXLAN_TTL, props->ttl);
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_VXLAN_AGEING, props->ageing);
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_VXLAN_LIMIT, props->limit);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_VXLAN_LEARNING, !!props->learning);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_VXLAN_PROXY, !!props->proxy);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_VXLAN_RSC, !!props->rsc);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_VXLAN_L2MISS, !!props->l2miss);
|
|
|
|
|
NLA_PUT_U8 (nlmsg, IFLA_VXLAN_L3MISS, !!props->l3miss);
|
|
|
|
|
|
|
|
|
|
nla_nest_end (nlmsg, data);
|
|
|
|
|
nla_nest_end (nlmsg, info);
|
|
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
return (do_add_link_with_lookup (platform, NM_LINK_TYPE_VXLAN, name, nlmsg, out_link) >= 0);
|
2015-10-14 10:01:48 +02:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-22 11:50:46 +02:00
|
|
|
static gboolean
|
|
|
|
|
link_6lowpan_add (NMPlatform *platform,
|
|
|
|
|
const char *name,
|
|
|
|
|
int parent,
|
|
|
|
|
const NMPlatformLink **out_link)
|
|
|
|
|
{
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
struct nlattr *info;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
NLM_F_CREATE | NLM_F_EXCL,
|
|
|
|
|
0,
|
|
|
|
|
name,
|
|
|
|
|
0,
|
|
|
|
|
0);
|
|
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_LINK, parent);
|
|
|
|
|
|
|
|
|
|
if (!(info = nla_nest_start (nlmsg, IFLA_LINKINFO)))
|
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
|
|
NLA_PUT_STRING (nlmsg, IFLA_INFO_KIND, "lowpan");
|
|
|
|
|
|
|
|
|
|
nla_nest_end (nlmsg, info);
|
|
|
|
|
|
2018-11-05 12:35:40 +01:00
|
|
|
return (do_add_link_with_lookup (platform,
|
|
|
|
|
NM_LINK_TYPE_6LOWPAN,
|
|
|
|
|
name, nlmsg, out_link) >= 0);
|
2018-05-22 11:50:46 +02:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
static void
|
|
|
|
|
_vlan_change_vlan_qos_mapping_create (gboolean is_ingress_map,
|
|
|
|
|
gboolean reset_all,
|
|
|
|
|
const NMVlanQosMapping *current_map,
|
|
|
|
|
guint current_n_map,
|
|
|
|
|
const NMVlanQosMapping *set_map,
|
|
|
|
|
guint set_n_map,
|
|
|
|
|
NMVlanQosMapping **out_map,
|
|
|
|
|
guint *out_n_map)
|
|
|
|
|
{
|
|
|
|
|
NMVlanQosMapping *map;
|
|
|
|
|
guint i, j, len;
|
|
|
|
|
const guint INGRESS_RANGE_LEN = 8;
|
|
|
|
|
|
|
|
|
|
nm_assert (out_map && !*out_map);
|
|
|
|
|
nm_assert (out_n_map && !*out_n_map);
|
|
|
|
|
|
|
|
|
|
if (!reset_all)
|
|
|
|
|
current_n_map = 0;
|
|
|
|
|
else if (is_ingress_map)
|
|
|
|
|
current_n_map = INGRESS_RANGE_LEN;
|
|
|
|
|
|
|
|
|
|
len = current_n_map + set_n_map;
|
|
|
|
|
|
|
|
|
|
if (len == 0)
|
|
|
|
|
return;
|
2013-03-27 22:53:55 +01:00
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
map = g_new (NMVlanQosMapping, len);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
if (current_n_map) {
|
|
|
|
|
if (is_ingress_map) {
|
|
|
|
|
/* For the ingress-map, there are only 8 entries (0 to 7).
|
2017-05-05 14:11:05 +02:00
|
|
|
* When the user requests to reset all entries, we don't actually
|
2015-10-27 16:14:54 +01:00
|
|
|
* need the cached entries, we can just explicitly clear all possible
|
|
|
|
|
* ones.
|
|
|
|
|
*
|
|
|
|
|
* That makes only a real difference in case our cache is out-of-date.
|
|
|
|
|
*
|
|
|
|
|
* For the egress map we cannot do that, because there are far too
|
|
|
|
|
* many. There we can only clear the entries that we know about. */
|
|
|
|
|
for (i = 0; i < INGRESS_RANGE_LEN; i++) {
|
|
|
|
|
map[i].from = i;
|
|
|
|
|
map[i].to = 0;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for (i = 0; i < current_n_map; i++) {
|
|
|
|
|
map[i].from = current_map[i].from;
|
|
|
|
|
map[i].to = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (set_n_map)
|
|
|
|
|
memcpy (&map[current_n_map], set_map, sizeof (*set_map) * set_n_map);
|
|
|
|
|
|
|
|
|
|
g_qsort_with_data (map,
|
|
|
|
|
len,
|
|
|
|
|
sizeof (*map),
|
|
|
|
|
_vlan_qos_mapping_cmp_from,
|
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
|
|
for (i = 0, j = 0; i < len; i++) {
|
|
|
|
|
if ( ( is_ingress_map && !VLAN_XGRESS_PRIO_VALID (map[i].from))
|
|
|
|
|
|| (!is_ingress_map && !VLAN_XGRESS_PRIO_VALID (map[i].to)))
|
|
|
|
|
continue;
|
|
|
|
|
if ( j > 0
|
|
|
|
|
&& map[j - 1].from == map[i].from)
|
|
|
|
|
map[j - 1] = map[i];
|
|
|
|
|
else
|
|
|
|
|
map[j++] = map[i];
|
|
|
|
|
}
|
2014-03-05 10:56:16 +01:00
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
*out_map = map;
|
|
|
|
|
*out_n_map = j;
|
2013-03-27 22:53:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
2015-10-27 16:14:54 +01:00
|
|
|
link_vlan_change (NMPlatform *platform,
|
|
|
|
|
int ifindex,
|
|
|
|
|
NMVlanFlags flags_mask,
|
|
|
|
|
NMVlanFlags flags_set,
|
|
|
|
|
gboolean ingress_reset_all,
|
|
|
|
|
const NMVlanQosMapping *ingress_map,
|
|
|
|
|
gsize n_ingress_map,
|
|
|
|
|
gboolean egress_reset_all,
|
|
|
|
|
const NMVlanQosMapping *egress_map,
|
|
|
|
|
gsize n_egress_map)
|
2013-03-27 22:53:55 +01:00
|
|
|
{
|
2015-10-27 16:14:54 +01:00
|
|
|
const NMPObject *obj_cache;
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2015-10-27 16:14:54 +01:00
|
|
|
const NMPObjectLnkVlan *lnk;
|
|
|
|
|
guint new_n_ingress_map = 0;
|
|
|
|
|
guint new_n_egress_map = 0;
|
|
|
|
|
gs_free NMVlanQosMapping *new_ingress_map = NULL;
|
|
|
|
|
gs_free NMVlanQosMapping *new_egress_map = NULL;
|
2013-03-27 22:53:55 +01:00
|
|
|
|
2017-06-29 11:18:10 +02:00
|
|
|
obj_cache = nmp_cache_lookup_link (nm_platform_get_cache (platform), ifindex);
|
2015-10-27 16:14:54 +01:00
|
|
|
if ( !obj_cache
|
|
|
|
|
|| !obj_cache->_link.netlink.is_in_netlink) {
|
|
|
|
|
_LOGD ("link: change %d: %s: link does not exist", ifindex, "vlan");
|
2015-10-20 09:27:16 +02:00
|
|
|
return FALSE;
|
2015-10-27 16:14:54 +01:00
|
|
|
}
|
2013-03-27 22:53:55 +01:00
|
|
|
|
2015-10-27 16:14:54 +01:00
|
|
|
lnk = obj_cache->_link.netlink.lnk ? &obj_cache->_link.netlink.lnk->_lnk_vlan : NULL;
|
|
|
|
|
|
|
|
|
|
flags_set &= flags_mask;
|
|
|
|
|
|
|
|
|
|
_vlan_change_vlan_qos_mapping_create (TRUE,
|
|
|
|
|
ingress_reset_all,
|
|
|
|
|
lnk ? lnk->ingress_qos_map : NULL,
|
|
|
|
|
lnk ? lnk->n_ingress_qos_map : 0,
|
|
|
|
|
ingress_map,
|
|
|
|
|
n_ingress_map,
|
|
|
|
|
&new_ingress_map,
|
|
|
|
|
&new_n_ingress_map);
|
|
|
|
|
|
|
|
|
|
_vlan_change_vlan_qos_mapping_create (FALSE,
|
|
|
|
|
egress_reset_all,
|
|
|
|
|
lnk ? lnk->egress_qos_map : NULL,
|
|
|
|
|
lnk ? lnk->n_egress_qos_map : 0,
|
|
|
|
|
egress_map,
|
|
|
|
|
n_egress_map,
|
|
|
|
|
&new_egress_map,
|
|
|
|
|
&new_n_egress_map);
|
|
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
0,
|
|
|
|
|
ifindex,
|
|
|
|
|
NULL,
|
2015-11-02 14:27:22 +01:00
|
|
|
0,
|
|
|
|
|
0);
|
2015-10-20 09:27:16 +02:00
|
|
|
if ( !nlmsg
|
|
|
|
|
|| !_nl_msg_new_link_set_linkinfo_vlan (nlmsg,
|
|
|
|
|
-1,
|
2015-10-27 16:14:54 +01:00
|
|
|
flags_mask,
|
|
|
|
|
flags_set,
|
|
|
|
|
new_ingress_map,
|
|
|
|
|
new_n_ingress_map,
|
|
|
|
|
new_egress_map,
|
|
|
|
|
new_n_egress_map))
|
2015-12-14 19:18:35 +01:00
|
|
|
g_return_val_if_reached (FALSE);
|
2014-03-05 10:56:16 +01:00
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (do_change_link (platform, CHANGE_LINK_TYPE_UNSPEC, ifindex, nlmsg, NULL) >= 0);
|
2013-03-27 22:53:55 +01:00
|
|
|
}
|
|
|
|
|
|
2013-03-27 22:53:55 +01:00
|
|
|
static gboolean
|
2013-08-02 00:43:12 +02:00
|
|
|
link_enslave (NMPlatform *platform, int master, int slave)
|
2013-03-27 22:53:55 +01:00
|
|
|
{
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
int ifindex = slave;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_link (RTM_NEWLINK,
|
|
|
|
|
0,
|
|
|
|
|
ifindex,
|
|
|
|
|
NULL,
|
2015-11-02 14:27:22 +01:00
|
|
|
0,
|
|
|
|
|
0);
|
2015-10-20 09:27:16 +02:00
|
|
|
if (!nlmsg)
|
|
|
|
|
return FALSE;
|
2014-03-05 10:56:16 +01:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
NLA_PUT_U32 (nlmsg, IFLA_MASTER, master);
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (do_change_link (platform, CHANGE_LINK_TYPE_UNSPEC, ifindex, nlmsg, NULL) >= 0);
|
2015-10-20 09:27:16 +02:00
|
|
|
nla_put_failure:
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
2013-03-27 22:53:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
link_release (NMPlatform *platform, int master, int slave)
|
|
|
|
|
{
|
2013-08-02 00:43:12 +02:00
|
|
|
return link_enslave (platform, 0, slave);
|
2013-03-27 22:53:55 +01:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-09-04 11:50:41 +02:00
|
|
|
|
2013-06-10 16:21:08 -03:00
|
|
|
static gboolean
|
2016-04-20 12:06:43 +02:00
|
|
|
_infiniband_partition_action (NMPlatform *platform,
|
|
|
|
|
InfinibandAction action,
|
|
|
|
|
int parent,
|
|
|
|
|
int p_key,
|
|
|
|
|
const NMPlatformLink **out_link)
|
2013-06-10 16:21:08 -03:00
|
|
|
{
|
2016-12-09 12:47:17 +01:00
|
|
|
nm_auto_close int dirfd = -1;
|
|
|
|
|
char ifname_parent[IFNAMSIZ];
|
2016-04-20 12:06:43 +02:00
|
|
|
const NMPObject *obj;
|
2016-04-20 11:50:45 +02:00
|
|
|
char id[20];
|
2016-04-20 12:06:43 +02:00
|
|
|
char name[IFNAMSIZ];
|
|
|
|
|
gboolean success;
|
2013-06-10 16:21:08 -03:00
|
|
|
|
2016-04-20 12:06:43 +02:00
|
|
|
nm_assert (NM_IN_SET (action, INFINIBAND_ACTION_CREATE_CHILD, INFINIBAND_ACTION_DELETE_CHILD));
|
2016-04-20 11:44:23 +02:00
|
|
|
nm_assert (p_key > 0 && p_key <= 0xffff && p_key != 0x8000);
|
|
|
|
|
|
2016-12-09 12:47:17 +01:00
|
|
|
dirfd = nm_platform_sysctl_open_netdir (platform, parent, ifname_parent);
|
|
|
|
|
if (dirfd < 0) {
|
2016-04-20 11:28:38 +02:00
|
|
|
errno = ENOENT;
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2013-06-10 16:21:08 -03:00
|
|
|
|
2016-04-20 11:50:45 +02:00
|
|
|
nm_sprintf_buf (id, "0x%04x", p_key);
|
2016-12-09 12:47:17 +01:00
|
|
|
if (action == INFINIBAND_ACTION_CREATE_CHILD)
|
|
|
|
|
success = nm_platform_sysctl_set (platform, NMP_SYSCTL_PATHID_NETDIR (dirfd, ifname_parent, "create_child"), id);
|
|
|
|
|
else
|
|
|
|
|
success = nm_platform_sysctl_set (platform, NMP_SYSCTL_PATHID_NETDIR (dirfd, ifname_parent, "delete_child"), id);
|
|
|
|
|
|
2016-04-20 12:06:43 +02:00
|
|
|
if (!success) {
|
|
|
|
|
if ( action == INFINIBAND_ACTION_DELETE_CHILD
|
|
|
|
|
&& errno == ENODEV)
|
|
|
|
|
return TRUE;
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
return FALSE;
|
2016-04-20 12:06:43 +02:00
|
|
|
}
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
|
2016-12-09 12:47:17 +01:00
|
|
|
nm_utils_new_infiniband_name (name, ifname_parent, p_key);
|
2016-04-20 11:28:38 +02:00
|
|
|
do_request_link (platform, 0, name);
|
2013-06-10 16:21:08 -03:00
|
|
|
|
2016-04-20 12:06:43 +02:00
|
|
|
if (action == INFINIBAND_ACTION_DELETE_CHILD)
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
2017-06-29 11:18:10 +02:00
|
|
|
obj = nmp_cache_lookup_link_full (nm_platform_get_cache (platform), 0, name, FALSE,
|
2016-04-20 12:06:43 +02:00
|
|
|
NM_LINK_TYPE_INFINIBAND, NULL, NULL);
|
2015-12-09 15:13:57 +01:00
|
|
|
if (out_link)
|
|
|
|
|
*out_link = obj ? &obj->link : NULL;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
return !!obj;
|
2013-06-10 16:21:08 -03:00
|
|
|
}
|
|
|
|
|
|
2016-04-20 09:16:21 +02:00
|
|
|
static gboolean
|
2016-04-20 12:06:43 +02:00
|
|
|
infiniband_partition_add (NMPlatform *platform, int parent, int p_key, const NMPlatformLink **out_link)
|
2016-04-20 09:16:21 +02:00
|
|
|
{
|
2016-04-20 12:06:43 +02:00
|
|
|
return _infiniband_partition_action (platform, INFINIBAND_ACTION_CREATE_CHILD, parent, p_key, out_link);
|
|
|
|
|
}
|
2016-04-20 09:16:21 +02:00
|
|
|
|
2016-04-20 12:06:43 +02:00
|
|
|
static gboolean
|
|
|
|
|
infiniband_partition_delete (NMPlatform *platform, int parent, int p_key)
|
|
|
|
|
{
|
|
|
|
|
return _infiniband_partition_action (platform, INFINIBAND_ACTION_DELETE_CHILD, parent, p_key, NULL);
|
2016-04-20 09:16:21 +02:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-09-04 11:50:41 +02:00
|
|
|
|
2018-06-05 15:20:54 +02:00
|
|
|
static GObject *
|
|
|
|
|
get_ext_data (NMPlatform *platform, int ifindex)
|
2014-02-04 14:27:03 +01:00
|
|
|
{
|
2018-05-15 16:47:13 +02:00
|
|
|
const NMPObject *obj;
|
2014-02-04 14:27:03 +01:00
|
|
|
|
2018-05-15 16:47:13 +02:00
|
|
|
obj = nmp_cache_lookup_link (nm_platform_get_cache (platform), ifindex);
|
|
|
|
|
if (!obj)
|
|
|
|
|
return NULL;
|
2014-02-04 14:27:03 +01:00
|
|
|
|
2018-06-05 15:20:54 +02:00
|
|
|
return obj->_link.ext_data;
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
2018-05-15 16:47:13 +02:00
|
|
|
|
2018-06-05 15:20:54 +02:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2016-02-19 01:06:28 +01:00
|
|
|
#define WIFI_GET_WIFI_DATA_NETNS(wifi_data, platform, ifindex, retval) \
|
|
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL; \
|
2018-06-09 13:56:21 +02:00
|
|
|
NMWifiUtils *wifi_data; \
|
2016-02-19 01:06:28 +01:00
|
|
|
if (!nm_platform_netns_push (platform, &netns)) \
|
|
|
|
|
return retval; \
|
2018-06-05 15:20:54 +02:00
|
|
|
wifi_data = NM_WIFI_UTILS (get_ext_data (platform, ifindex)); \
|
2016-02-19 01:06:28 +01:00
|
|
|
if (!wifi_data) \
|
|
|
|
|
return retval;
|
2014-02-04 14:27:03 +01:00
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
wifi_get_capabilities (NMPlatform *platform, int ifindex, NMDeviceWifiCapabilities *caps)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, FALSE);
|
2014-02-04 14:27:03 +01:00
|
|
|
if (caps)
|
2018-06-09 13:56:21 +02:00
|
|
|
*caps = nm_wifi_utils_get_caps (wifi_data);
|
2014-02-04 14:27:03 +01:00
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
2014-07-07 12:04:14 -04:00
|
|
|
wifi_get_bssid (NMPlatform *platform, int ifindex, guint8 *bssid)
|
2014-02-04 14:27:03 +01:00
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, FALSE);
|
2018-06-09 13:56:21 +02:00
|
|
|
return nm_wifi_utils_get_bssid (wifi_data, bssid);
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static guint32
|
|
|
|
|
wifi_get_frequency (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, 0);
|
2018-06-09 13:56:21 +02:00
|
|
|
return nm_wifi_utils_get_freq (wifi_data);
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
wifi_get_quality (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, FALSE);
|
2018-06-09 13:56:21 +02:00
|
|
|
return nm_wifi_utils_get_qual (wifi_data);
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static guint32
|
|
|
|
|
wifi_get_rate (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, FALSE);
|
2018-06-09 13:56:21 +02:00
|
|
|
return nm_wifi_utils_get_rate (wifi_data);
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static NM80211Mode
|
|
|
|
|
wifi_get_mode (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, NM_802_11_MODE_UNKNOWN);
|
2018-06-09 13:56:21 +02:00
|
|
|
return nm_wifi_utils_get_mode (wifi_data);
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
wifi_set_mode (NMPlatform *platform, int ifindex, NM80211Mode mode)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, );
|
2018-06-09 13:56:21 +02:00
|
|
|
nm_wifi_utils_set_mode (wifi_data, mode);
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
|
|
|
|
|
2014-10-23 14:19:59 -04:00
|
|
|
static void
|
|
|
|
|
wifi_set_powersave (NMPlatform *platform, int ifindex, guint32 powersave)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, );
|
2018-06-09 13:56:21 +02:00
|
|
|
nm_wifi_utils_set_powersave (wifi_data, powersave);
|
2014-10-23 14:19:59 -04:00
|
|
|
}
|
|
|
|
|
|
2014-02-04 14:27:03 +01:00
|
|
|
static guint32
|
|
|
|
|
wifi_find_frequency (NMPlatform *platform, int ifindex, const guint32 *freqs)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, 0);
|
2018-06-09 13:56:21 +02:00
|
|
|
return nm_wifi_utils_find_freq (wifi_data, freqs);
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
wifi_indicate_addressing_running (NMPlatform *platform, int ifindex, gboolean running)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, );
|
2018-06-09 13:56:21 +02:00
|
|
|
nm_wifi_utils_indicate_addressing_running (wifi_data, running);
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
|
|
|
|
|
2018-06-19 14:44:36 +02:00
|
|
|
static NMSettingWirelessWakeOnWLan
|
|
|
|
|
wifi_get_wake_on_wlan (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
|
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, FALSE);
|
2018-06-09 15:50:56 +02:00
|
|
|
return nm_wifi_utils_get_wake_on_wlan (wifi_data);
|
2018-06-19 14:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
2018-05-25 17:43:54 +02:00
|
|
|
static gboolean
|
|
|
|
|
wifi_set_wake_on_wlan (NMPlatform *platform, int ifindex,
|
|
|
|
|
NMSettingWirelessWakeOnWLan wowl)
|
|
|
|
|
{
|
|
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, FALSE);
|
2018-06-09 15:50:56 +02:00
|
|
|
return nm_wifi_utils_set_wake_on_wlan (wifi_data, wowl);
|
2018-05-25 17:43:54 +02:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2014-02-04 14:27:03 +01:00
|
|
|
|
2016-03-21 15:22:10 +01:00
|
|
|
static gboolean
|
|
|
|
|
link_can_assume (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
NMPLookup lookup;
|
|
|
|
|
const NMPObject *link, *o;
|
|
|
|
|
NMDedupMultiIter iter;
|
2017-06-29 11:18:10 +02:00
|
|
|
NMPCache *cache = nm_platform_get_cache (platform);
|
2016-03-21 15:22:10 +01:00
|
|
|
|
|
|
|
|
if (ifindex <= 0)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
2017-07-04 12:49:47 +02:00
|
|
|
link = nm_platform_link_get_obj (platform, ifindex, TRUE);
|
2016-03-21 15:22:10 +01:00
|
|
|
if (!link)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
if (!NM_FLAGS_HAS (link->link.n_ifi_flags, IFF_UP))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
if (link->link.master > 0)
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
2017-11-23 15:41:57 +01:00
|
|
|
nmp_lookup_init_object (&lookup,
|
|
|
|
|
NMP_OBJECT_TYPE_IP4_ADDRESS,
|
|
|
|
|
ifindex);
|
2017-06-29 11:18:10 +02:00
|
|
|
if (nmp_cache_lookup (cache, &lookup))
|
2016-03-21 15:22:10 +01:00
|
|
|
return TRUE;
|
|
|
|
|
|
2017-11-23 15:41:57 +01:00
|
|
|
nmp_lookup_init_object (&lookup,
|
|
|
|
|
NMP_OBJECT_TYPE_IP6_ADDRESS,
|
|
|
|
|
ifindex);
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
nmp_cache_iter_for_each (&iter,
|
2017-06-29 11:18:10 +02:00
|
|
|
nmp_cache_lookup (cache, &lookup),
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
&o) {
|
|
|
|
|
nm_assert (NMP_OBJECT_GET_TYPE (o) == NMP_OBJECT_TYPE_IP6_ADDRESS);
|
|
|
|
|
if (!IN6_IS_ADDR_LINKLOCAL (&o->ip6_address.address))
|
2017-07-04 12:49:47 +02:00
|
|
|
return TRUE;
|
2016-03-21 15:22:10 +01:00
|
|
|
}
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2016-03-21 15:22:10 +01:00
|
|
|
|
2014-02-04 14:27:03 +01:00
|
|
|
static guint32
|
|
|
|
|
mesh_get_channel (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, 0);
|
2018-06-09 13:56:21 +02:00
|
|
|
return nm_wifi_utils_get_mesh_channel (wifi_data);
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
mesh_set_channel (NMPlatform *platform, int ifindex, guint32 channel)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, FALSE);
|
2018-06-09 13:56:21 +02:00
|
|
|
return nm_wifi_utils_set_mesh_channel (wifi_data, channel);
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
2014-06-26 10:42:11 -04:00
|
|
|
mesh_set_ssid (NMPlatform *platform, int ifindex, const guint8 *ssid, gsize len)
|
2014-02-04 14:27:03 +01:00
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
WIFI_GET_WIFI_DATA_NETNS (wifi_data, platform, ifindex, FALSE);
|
2018-06-09 13:56:21 +02:00
|
|
|
return nm_wifi_utils_set_mesh_ssid (wifi_data, ssid, len);
|
2014-02-04 14:27:03 +01:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-09-04 11:50:41 +02:00
|
|
|
|
2018-06-05 15:20:54 +02:00
|
|
|
#define WPAN_GET_WPAN_DATA(wpan_data, platform, ifindex, retval) \
|
|
|
|
|
NMWpanUtils *wpan_data = NM_WPAN_UTILS (get_ext_data (platform, ifindex)); \
|
|
|
|
|
if (!wpan_data) \
|
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
|
|
static guint16
|
|
|
|
|
wpan_get_pan_id (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
|
|
|
|
WPAN_GET_WPAN_DATA (wpan_data, platform, ifindex, G_MAXINT16);
|
|
|
|
|
return nm_wpan_utils_get_pan_id (wpan_data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
wpan_set_pan_id (NMPlatform *platform, int ifindex, guint16 pan_id)
|
|
|
|
|
{
|
|
|
|
|
WPAN_GET_WPAN_DATA (wpan_data, platform, ifindex, FALSE);
|
|
|
|
|
return nm_wpan_utils_set_pan_id (wpan_data, pan_id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static guint16
|
|
|
|
|
wpan_get_short_addr (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
|
|
|
|
WPAN_GET_WPAN_DATA (wpan_data, platform, ifindex, G_MAXINT16);
|
|
|
|
|
return nm_wpan_utils_get_short_addr (wpan_data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
wpan_set_short_addr (NMPlatform *platform, int ifindex, guint16 short_addr)
|
|
|
|
|
{
|
|
|
|
|
WPAN_GET_WPAN_DATA (wpan_data, platform, ifindex, FALSE);
|
|
|
|
|
return nm_wpan_utils_set_short_addr (wpan_data, short_addr);
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-19 19:43:09 +02:00
|
|
|
static gboolean
|
|
|
|
|
wpan_set_channel (NMPlatform *platform, int ifindex, guint8 page, guint8 channel)
|
|
|
|
|
{
|
|
|
|
|
WPAN_GET_WPAN_DATA (wpan_data, platform, ifindex, FALSE);
|
|
|
|
|
return nm_wpan_utils_set_channel (wpan_data, page, channel);
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-05 15:20:54 +02:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2014-02-05 11:56:44 +01:00
|
|
|
static gboolean
|
|
|
|
|
link_get_wake_on_lan (NMPlatform *platform, int ifindex)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
2015-06-20 12:05:01 +02:00
|
|
|
NMLinkType type = nm_platform_link_get_type (platform, ifindex);
|
2014-02-05 11:56:44 +01:00
|
|
|
|
2016-02-19 01:06:28 +01:00
|
|
|
if (!nm_platform_netns_push (platform, &netns))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
2015-05-03 10:15:57 +02:00
|
|
|
if (type == NM_LINK_TYPE_ETHERNET)
|
2016-12-11 22:46:14 +01:00
|
|
|
return nmp_utils_ethtool_get_wake_on_lan (ifindex);
|
2015-05-03 10:15:57 +02:00
|
|
|
else if (type == NM_LINK_TYPE_WIFI) {
|
2018-06-05 15:20:54 +02:00
|
|
|
NMWifiUtils *wifi_data = NM_WIFI_UTILS (get_ext_data (platform, ifindex));
|
2014-02-05 11:56:44 +01:00
|
|
|
|
|
|
|
|
if (!wifi_data)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
2018-06-09 13:56:21 +02:00
|
|
|
return nm_wifi_utils_get_wake_on_wlan (wifi_data) != NM_SETTING_WIRELESS_WAKE_ON_WLAN_NONE;
|
2014-02-05 11:56:44 +01:00
|
|
|
} else
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-03 13:41:49 -05:00
|
|
|
static gboolean
|
|
|
|
|
link_get_driver_info (NMPlatform *platform,
|
|
|
|
|
int ifindex,
|
|
|
|
|
char **out_driver_name,
|
|
|
|
|
char **out_driver_version,
|
|
|
|
|
char **out_fw_version)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
2016-12-12 13:47:52 +01:00
|
|
|
NMPUtilsEthtoolDriverInfo driver_info;
|
2016-02-19 01:06:28 +01:00
|
|
|
|
|
|
|
|
if (!nm_platform_netns_push (platform, &netns))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
2016-12-12 13:47:52 +01:00
|
|
|
if (!nmp_utils_ethtool_get_driver_info (ifindex, &driver_info))
|
|
|
|
|
return FALSE;
|
|
|
|
|
NM_SET_OUT (out_driver_name, g_strdup (driver_info.driver));
|
|
|
|
|
NM_SET_OUT (out_driver_version, g_strdup (driver_info.version));
|
|
|
|
|
NM_SET_OUT (out_fw_version, g_strdup (driver_info.fw_version));
|
|
|
|
|
return TRUE;
|
2014-10-03 13:41:49 -05:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2013-03-27 22:23:24 +01:00
|
|
|
static gboolean
|
2013-12-02 10:20:26 -05:00
|
|
|
ip4_address_add (NMPlatform *platform,
|
|
|
|
|
int ifindex,
|
|
|
|
|
in_addr_t addr,
|
2016-04-06 18:04:26 +02:00
|
|
|
guint8 plen,
|
2015-10-10 19:58:59 +02:00
|
|
|
in_addr_t peer_addr,
|
2013-12-02 10:20:26 -05:00
|
|
|
guint32 lifetime,
|
2014-02-19 16:10:59 -05:00
|
|
|
guint32 preferred,
|
2016-02-29 17:06:21 +01:00
|
|
|
guint32 flags,
|
2014-02-19 16:10:59 -05:00
|
|
|
const char *label)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
2015-10-26 09:19:15 +01:00
|
|
|
NMPObject obj_id;
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_address (RTM_NEWADDR,
|
|
|
|
|
NLM_F_CREATE | NLM_F_REPLACE,
|
|
|
|
|
AF_INET,
|
|
|
|
|
ifindex,
|
|
|
|
|
&addr,
|
|
|
|
|
plen,
|
|
|
|
|
&peer_addr,
|
2016-02-29 17:06:21 +01:00
|
|
|
flags,
|
2016-03-07 11:45:44 +01:00
|
|
|
nm_utils_ip4_address_is_link_local (addr) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE,
|
2015-10-20 09:27:16 +02:00
|
|
|
lifetime,
|
|
|
|
|
preferred,
|
|
|
|
|
label);
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nmp_object_stackinit_id_ip4_address (&obj_id, ifindex, addr, plen, peer_addr);
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (do_add_addrroute (platform, &obj_id, nlmsg, FALSE) >= 0);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
2013-12-02 10:20:26 -05:00
|
|
|
ip6_address_add (NMPlatform *platform,
|
|
|
|
|
int ifindex,
|
|
|
|
|
struct in6_addr addr,
|
2016-04-06 18:04:26 +02:00
|
|
|
guint8 plen,
|
2015-10-10 19:58:59 +02:00
|
|
|
struct in6_addr peer_addr,
|
2013-12-02 10:20:26 -05:00
|
|
|
guint32 lifetime,
|
|
|
|
|
guint32 preferred,
|
2016-02-29 15:36:12 +01:00
|
|
|
guint32 flags)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
2015-10-26 09:19:15 +01:00
|
|
|
NMPObject obj_id;
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
|
|
|
|
|
|
|
|
|
nlmsg = _nl_msg_new_address (RTM_NEWADDR,
|
|
|
|
|
NLM_F_CREATE | NLM_F_REPLACE,
|
|
|
|
|
AF_INET6,
|
|
|
|
|
ifindex,
|
|
|
|
|
&addr,
|
|
|
|
|
plen,
|
2019-02-03 21:40:53 +01:00
|
|
|
IN6_IS_ADDR_UNSPECIFIED (&peer_addr) ? NULL : &peer_addr,
|
2015-10-20 09:27:16 +02:00
|
|
|
flags,
|
|
|
|
|
RT_SCOPE_UNIVERSE,
|
|
|
|
|
lifetime,
|
|
|
|
|
preferred,
|
|
|
|
|
NULL);
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
|
2017-05-26 21:49:00 +02:00
|
|
|
nmp_object_stackinit_id_ip6_address (&obj_id, ifindex, &addr);
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return (do_add_addrroute (platform, &obj_id, nlmsg, FALSE) >= 0);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
2016-04-06 18:04:26 +02:00
|
|
|
ip4_address_delete (NMPlatform *platform, int ifindex, in_addr_t addr, guint8 plen, in_addr_t peer_address)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2015-10-26 09:19:15 +01:00
|
|
|
NMPObject obj_id;
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nlmsg = _nl_msg_new_address (RTM_DELADDR,
|
|
|
|
|
0,
|
|
|
|
|
AF_INET,
|
|
|
|
|
ifindex,
|
|
|
|
|
&addr,
|
|
|
|
|
plen,
|
|
|
|
|
&peer_address,
|
|
|
|
|
0,
|
|
|
|
|
RT_SCOPE_NOWHERE,
|
|
|
|
|
NM_PLATFORM_LIFETIME_PERMANENT,
|
|
|
|
|
NM_PLATFORM_LIFETIME_PERMANENT,
|
|
|
|
|
NULL);
|
2015-12-15 13:25:53 +01:00
|
|
|
if (!nlmsg)
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2015-10-26 09:19:15 +01:00
|
|
|
nmp_object_stackinit_id_ip4_address (&obj_id, ifindex, addr, plen, peer_address);
|
2015-10-20 09:27:16 +02:00
|
|
|
return do_delete_object (platform, &obj_id, nlmsg);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
2016-04-06 18:04:26 +02:00
|
|
|
ip6_address_delete (NMPlatform *platform, int ifindex, struct in6_addr addr, guint8 plen)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2015-10-26 09:19:15 +01:00
|
|
|
NMPObject obj_id;
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
nlmsg = _nl_msg_new_address (RTM_DELADDR,
|
|
|
|
|
0,
|
|
|
|
|
AF_INET6,
|
|
|
|
|
ifindex,
|
|
|
|
|
&addr,
|
|
|
|
|
plen,
|
|
|
|
|
NULL,
|
|
|
|
|
0,
|
|
|
|
|
RT_SCOPE_NOWHERE,
|
|
|
|
|
NM_PLATFORM_LIFETIME_PERMANENT,
|
|
|
|
|
NM_PLATFORM_LIFETIME_PERMANENT,
|
|
|
|
|
NULL);
|
2015-12-15 13:25:53 +01:00
|
|
|
if (!nlmsg)
|
|
|
|
|
g_return_val_if_reached (FALSE);
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2017-05-26 21:49:00 +02:00
|
|
|
nmp_object_stackinit_id_ip6_address (&obj_id, ifindex, &addr);
|
2015-10-20 09:27:16 +02:00
|
|
|
return do_delete_object (platform, &obj_id, nlmsg);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2013-03-27 22:23:24 +01:00
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
2017-08-02 10:27:32 +02:00
|
|
|
ip_route_add (NMPlatform *platform,
|
|
|
|
|
NMPNlmFlags flags,
|
|
|
|
|
int addr_family,
|
|
|
|
|
const NMPlatformIPRoute *route)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
NMPObject obj;
|
2017-08-02 10:27:32 +02:00
|
|
|
|
|
|
|
|
switch (addr_family) {
|
|
|
|
|
case AF_INET:
|
|
|
|
|
nmp_object_stackinit (&obj, NMP_OBJECT_TYPE_IP4_ROUTE, (const NMPlatformObject *) route);
|
|
|
|
|
break;
|
|
|
|
|
case AF_INET6:
|
|
|
|
|
nmp_object_stackinit (&obj, NMP_OBJECT_TYPE_IP6_ROUTE, (const NMPlatformObject *) route);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
nm_assert_not_reached ();
|
|
|
|
|
}
|
2015-10-20 09:27:16 +02:00
|
|
|
|
2017-08-17 13:37:21 +02:00
|
|
|
nm_platform_ip_route_normalize (addr_family, NMP_OBJECT_CAST_IP_ROUTE (&obj));
|
|
|
|
|
|
2017-08-21 18:02:08 +02:00
|
|
|
nlmsg = _nl_msg_new_route (RTM_NEWROUTE, flags & NMP_NLM_FLAG_FMASK, &obj);
|
2017-08-02 10:27:32 +02:00
|
|
|
if (!nlmsg)
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
g_return_val_if_reached (-NME_BUG);
|
2017-08-21 18:02:08 +02:00
|
|
|
return do_add_addrroute (platform,
|
|
|
|
|
&obj,
|
|
|
|
|
nlmsg,
|
|
|
|
|
NM_FLAGS_HAS (flags, NMP_NLM_FLAG_SUPPRESS_NETLINK_FAILURE));
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
2017-11-29 13:10:39 +01:00
|
|
|
object_delete (NMPlatform *platform,
|
|
|
|
|
const NMPObject *obj)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
nm_auto_nmpobj const NMPObject *obj_keep_alive = NULL;
|
2015-10-20 09:27:16 +02:00
|
|
|
nm_auto_nlmsg struct nl_msg *nlmsg = NULL;
|
2014-02-13 15:11:05 +01:00
|
|
|
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
if (!NMP_OBJECT_IS_STACKINIT (obj))
|
|
|
|
|
obj_keep_alive = nmp_object_ref (obj);
|
platform: fix lookup of routes and deletion of IPv4 routes
When doing a lookup for an libnl route, the cache comparison function
for routes takes into account 'family', 'tos', 'table', 'dst', and 'prio'.
In NetworkManager we don't use all of these properties for a route, so
at several places when doing a cache lookup we don't have all identifying
properties. Usually we only have 'family' and 'dst' ('table' is
implicit 0, because NM does currently not care about any other tables).
The problem is that NM sees routes with different 'tos', 'prio', but it
cannot look them up in the cache. Add a hack to search the cache
fuzzy.
This is similar to the hack for link, where the identifying properties
are 'family' and 'ifindex', but we only have 'ifindex' at hand. However,
contrary to this hack, we coerce the 'family' to AF_UNSPEC for every link cache
operation. This is not viable in this case, because we internally need
the 'tos' field.
We need the 'tos' field because when deleting an IPv4 route, the 'tos' field must
match. See fib_table_delete(). This was already partially fixed by commit
f0daf90298d1bd9cafac7b9c02dc905327e0b85a, but before the lookup to the
cached object would fail for any non-zero 'tos'.
Signed-off-by: Thomas Haller <thaller@redhat.com>
2014-05-28 18:46:12 +02:00
|
|
|
|
2017-11-29 13:10:39 +01:00
|
|
|
switch (NMP_OBJECT_GET_TYPE (obj)) {
|
|
|
|
|
case NMP_OBJECT_TYPE_IP4_ROUTE:
|
|
|
|
|
case NMP_OBJECT_TYPE_IP6_ROUTE:
|
|
|
|
|
nlmsg = _nl_msg_new_route (RTM_DELROUTE, 0, obj);
|
|
|
|
|
break;
|
2017-11-15 20:36:35 +01:00
|
|
|
case NMP_OBJECT_TYPE_QDISC:
|
|
|
|
|
nlmsg = _nl_msg_new_qdisc (RTM_DELQDISC, 0, NMP_OBJECT_CAST_QDISC (obj));
|
|
|
|
|
break;
|
2017-11-15 20:36:35 +01:00
|
|
|
case NMP_OBJECT_TYPE_TFILTER:
|
|
|
|
|
nlmsg = _nl_msg_new_tfilter (RTM_DELTFILTER, 0, NMP_OBJECT_CAST_TFILTER (obj));
|
|
|
|
|
break;
|
2017-11-29 13:10:39 +01:00
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-20 09:27:16 +02:00
|
|
|
if (!nlmsg)
|
2017-08-02 10:27:32 +02:00
|
|
|
g_return_val_if_reached (FALSE);
|
platform: pass full route object to platform delete function
Contrary to addresses, routes have no ID. When deleting a route,
you cannot just specify certain properties like network/plen,metric.
Well, actually you can specify only certain properties, but then kernel
will treat unspecified properties as wildcard and delete the first matching
route. That is not something we want, because we need to be in control which
exact route shall be deleted.
Also, rtm_tos *must* match. Even if we like the wildcard behavior,
we would need to pass TOS to nm_platform_ip4_route_delete() to be
able to delete routes with non-zero TOS. So, while certain properties
may be omitted, some must not. See how test_ip4_route_options() was
broken.
For NetworkManager it only makes ever sense to call delete on a route,
if the route is already fully known. Which means, we only delete routes
that we have already in the platform cache (otherwise, how would we know
that there is something to delete). Because of that, no longer have separate
IPv4 and IPv6 functions. Instead, have nm_platform_ip_route_delete() which
accepts a full NMPObject from the platform cache.
The code in core doesn't jet make use of this new functionality. It will
in the future.
At least, it fixes deleting routes with differing TOS.
2017-07-11 16:38:49 +02:00
|
|
|
return do_delete_object (platform, obj, nlmsg);
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2013-03-27 22:23:24 +01:00
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
2017-08-16 16:13:24 +02:00
|
|
|
ip_route_get (NMPlatform *platform,
|
|
|
|
|
int addr_family,
|
|
|
|
|
gconstpointer address,
|
2017-09-07 11:11:32 +02:00
|
|
|
int oif_ifindex,
|
2017-08-16 16:13:24 +02:00
|
|
|
NMPObject **out_route)
|
|
|
|
|
{
|
|
|
|
|
const gboolean is_v4 = (addr_family == AF_INET);
|
|
|
|
|
const int addr_len = is_v4 ? 4 : 16;
|
|
|
|
|
int try_count = 0;
|
|
|
|
|
WaitForNlResponseResult seq_result;
|
|
|
|
|
int nle;
|
2018-01-14 15:17:14 +01:00
|
|
|
nm_auto_nmpobj NMPObject *route = NULL;
|
2017-08-16 16:13:24 +02:00
|
|
|
|
|
|
|
|
nm_assert (NM_IS_LINUX_PLATFORM (platform));
|
|
|
|
|
nm_assert (NM_IN_SET (addr_family, AF_INET, AF_INET6));
|
|
|
|
|
nm_assert (address);
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
struct {
|
|
|
|
|
struct nlmsghdr n;
|
|
|
|
|
struct rtmsg r;
|
|
|
|
|
char buf[64];
|
|
|
|
|
} req = {
|
|
|
|
|
.n.nlmsg_len = NLMSG_LENGTH (sizeof (struct rtmsg)),
|
|
|
|
|
.n.nlmsg_flags = NLM_F_REQUEST,
|
|
|
|
|
.n.nlmsg_type = RTM_GETROUTE,
|
|
|
|
|
.r.rtm_family = addr_family,
|
|
|
|
|
.r.rtm_tos = 0,
|
|
|
|
|
.r.rtm_dst_len = is_v4 ? 32 : 128,
|
|
|
|
|
.r.rtm_flags = 0x1000 /* RTM_F_LOOKUP_TABLE */,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
g_clear_pointer (&route, nmp_object_unref);
|
|
|
|
|
|
|
|
|
|
if (!_nl_addattr_l (&req.n, sizeof (req), RTA_DST, address, addr_len))
|
|
|
|
|
nm_assert_not_reached ();
|
|
|
|
|
|
2017-09-07 11:11:32 +02:00
|
|
|
if (oif_ifindex > 0) {
|
|
|
|
|
gint32 ii = oif_ifindex;
|
|
|
|
|
|
|
|
|
|
if (!_nl_addattr_l (&req.n, sizeof (req), RTA_OIF, &ii, sizeof (ii)))
|
|
|
|
|
nm_assert_not_reached ();
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-16 16:13:24 +02:00
|
|
|
seq_result = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN;
|
2018-03-09 15:50:16 +01:00
|
|
|
nle = _nl_send_nlmsghdr (platform, &req.n, &seq_result, NULL, DELAYED_ACTION_RESPONSE_TYPE_ROUTE_GET, &route);
|
2017-08-16 16:13:24 +02:00
|
|
|
if (nle < 0) {
|
|
|
|
|
_LOGE ("get-route: failure sending netlink request \"%s\" (%d)",
|
2019-01-31 17:08:03 +01:00
|
|
|
nm_strerror_native (-nle), -nle);
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return -NME_UNSPEC;
|
2017-08-16 16:13:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
delayed_action_handle_all (platform, FALSE);
|
|
|
|
|
|
|
|
|
|
/* Retry, if we failed due to a cache resync. That can happen when the netlink
|
|
|
|
|
* socket fills up and we lost the response. */
|
|
|
|
|
} while ( seq_result == WAIT_FOR_NL_RESPONSE_RESULT_FAILED_RESYNC
|
|
|
|
|
&& ++try_count < 10);
|
|
|
|
|
|
|
|
|
|
if (seq_result < 0) {
|
|
|
|
|
/* negative seq_result is an errno from kernel. Map it to negative
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
* int (which are also errno). */
|
|
|
|
|
return (int) seq_result;
|
2017-08-16 16:13:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (seq_result == WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK) {
|
|
|
|
|
if (route) {
|
|
|
|
|
NM_SET_OUT (out_route, g_steal_pointer (&route));
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return 0;
|
2017-08-16 16:13:24 +02:00
|
|
|
}
|
|
|
|
|
seq_result = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_UNKNOWN;
|
|
|
|
|
}
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return -NME_UNSPEC;
|
2017-08-16 16:13:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
2017-11-15 20:36:35 +01:00
|
|
|
qdisc_add (NMPlatform *platform,
|
|
|
|
|
NMPNlmFlags flags,
|
|
|
|
|
const NMPlatformQdisc *qdisc)
|
|
|
|
|
{
|
|
|
|
|
WaitForNlResponseResult seq_result = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN;
|
2018-03-09 15:50:16 +01:00
|
|
|
gs_free char *errmsg = NULL;
|
2017-11-15 20:36:35 +01:00
|
|
|
int nle;
|
|
|
|
|
char s_buf[256];
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *msg = NULL;
|
|
|
|
|
|
|
|
|
|
msg = _nl_msg_new_qdisc (RTM_NEWQDISC, flags, qdisc);
|
|
|
|
|
|
|
|
|
|
event_handler_read_netlink (platform, FALSE);
|
|
|
|
|
|
2018-03-09 15:50:16 +01:00
|
|
|
nle = _nl_send_nlmsg (platform, msg, &seq_result, &errmsg, DELAYED_ACTION_RESPONSE_TYPE_VOID, NULL);
|
2017-11-15 20:36:35 +01:00
|
|
|
if (nle < 0) {
|
|
|
|
|
_LOGE ("do-add-qdisc: failed sending netlink request \"%s\" (%d)",
|
2018-12-22 13:35:57 +01:00
|
|
|
nm_strerror (nle), -nle);
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return -NME_PL_NETLINK;
|
2017-11-15 20:36:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
delayed_action_handle_all (platform, FALSE);
|
|
|
|
|
|
|
|
|
|
nm_assert (seq_result);
|
|
|
|
|
|
|
|
|
|
_NMLOG (seq_result == WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK
|
|
|
|
|
? LOGL_DEBUG
|
|
|
|
|
: LOGL_WARN,
|
|
|
|
|
"do-add-qdisc: %s",
|
2018-03-09 15:50:16 +01:00
|
|
|
wait_for_nl_response_to_string (seq_result, errmsg, s_buf, sizeof (s_buf)));
|
2017-11-15 20:36:35 +01:00
|
|
|
|
|
|
|
|
if (seq_result == WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK)
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return 0;
|
2017-11-15 20:36:35 +01:00
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return -NME_UNSPEC;
|
2017-11-15 20:36:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
static int
|
2017-11-15 20:36:35 +01:00
|
|
|
tfilter_add (NMPlatform *platform,
|
|
|
|
|
NMPNlmFlags flags,
|
|
|
|
|
const NMPlatformTfilter *tfilter)
|
|
|
|
|
{
|
|
|
|
|
WaitForNlResponseResult seq_result = WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN;
|
2018-03-09 15:50:16 +01:00
|
|
|
gs_free char *errmsg = NULL;
|
2017-11-15 20:36:35 +01:00
|
|
|
int nle;
|
|
|
|
|
char s_buf[256];
|
|
|
|
|
nm_auto_nlmsg struct nl_msg *msg = NULL;
|
|
|
|
|
|
|
|
|
|
msg = _nl_msg_new_tfilter (RTM_NEWTFILTER, flags, tfilter);
|
|
|
|
|
|
|
|
|
|
event_handler_read_netlink (platform, FALSE);
|
|
|
|
|
|
2018-03-09 15:50:16 +01:00
|
|
|
nle = _nl_send_nlmsg (platform, msg, &seq_result, &errmsg, DELAYED_ACTION_RESPONSE_TYPE_VOID, NULL);
|
2017-11-15 20:36:35 +01:00
|
|
|
if (nle < 0) {
|
|
|
|
|
_LOGE ("do-add-tfilter: failed sending netlink request \"%s\" (%d)",
|
2018-12-22 13:35:57 +01:00
|
|
|
nm_strerror (nle), -nle);
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return -NME_PL_NETLINK;
|
2017-11-15 20:36:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
delayed_action_handle_all (platform, FALSE);
|
|
|
|
|
|
|
|
|
|
nm_assert (seq_result);
|
|
|
|
|
|
|
|
|
|
_NMLOG (seq_result == WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK
|
|
|
|
|
? LOGL_DEBUG
|
|
|
|
|
: LOGL_WARN,
|
|
|
|
|
"do-add-tfilter: %s",
|
2018-03-09 15:50:16 +01:00
|
|
|
wait_for_nl_response_to_string (seq_result, errmsg, s_buf, sizeof (s_buf)));
|
2017-11-15 20:36:35 +01:00
|
|
|
|
|
|
|
|
if (seq_result == WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK)
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return 0;
|
2017-11-15 20:36:35 +01:00
|
|
|
|
platform: merge NMPlatformError with nm-error
Platform had it's own scheme for reporting errors: NMPlatformError.
Before, NMPlatformError indicated success via zero, negative integer
values are numbers from <errno.h>, and positive integer values are
platform specific codes. This changes now according to nm-error:
success is still zero. Negative values indicate a failure, where the
numeric value is either from <errno.h> or one of our error codes.
The meaning of positive values depends on the functions. Most functions
can only report an error reason (negative) and success (zero). For such
functions, positive values should never be returned (but the caller
should anticipate them).
For some functions, positive values could mean additional information
(but still success). That depends.
This is also what systemd does, except that systemd only returns
(negative) integers from <errno.h>, while we merge our own error codes
into the range of <errno.h>.
The advantage is to get rid of one way how to signal errors. The other
advantage is, that these error codes are compatible with all other
nm-errno values. For example, previously negative values indicated error
codes from <errno.h>, but it did not entail error codes from netlink.
2018-12-22 14:13:05 +01:00
|
|
|
return -NME_UNSPEC;
|
2017-11-15 20:36:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2013-03-27 22:23:24 +01:00
|
|
|
#define EVENT_CONDITIONS ((GIOCondition) (G_IO_IN | G_IO_PRI))
|
|
|
|
|
#define ERROR_CONDITIONS ((GIOCondition) (G_IO_ERR | G_IO_NVAL))
|
|
|
|
|
#define DISCONNECT_CONDITIONS ((GIOCondition) (G_IO_HUP))
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
event_handler (GIOChannel *channel,
|
2014-03-04 18:07:05 -05:00
|
|
|
GIOCondition io_condition,
|
|
|
|
|
gpointer user_data)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
delayed_action_handle_all (NM_PLATFORM (user_data), TRUE);
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-11 19:25:00 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
/* copied from libnl3's recvmsgs() */
|
|
|
|
|
static int
|
2015-12-13 10:30:26 +01:00
|
|
|
event_handler_recvmsgs (NMPlatform *platform, gboolean handle_events)
|
2015-12-11 19:25:00 +01:00
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
2015-12-15 10:51:26 +01:00
|
|
|
struct nl_sock *sk = priv->nlh;
|
2018-02-17 16:33:00 +01:00
|
|
|
int n;
|
|
|
|
|
int err = 0;
|
|
|
|
|
gboolean multipart = 0;
|
|
|
|
|
gboolean interrupted = FALSE;
|
2015-12-11 19:25:00 +01:00
|
|
|
struct nlmsghdr *hdr;
|
2015-12-14 14:47:41 +01:00
|
|
|
WaitForNlResponseResult seq_result;
|
2015-12-11 19:25:00 +01:00
|
|
|
struct sockaddr_nl nla = {0};
|
2018-12-23 14:39:22 +01:00
|
|
|
struct ucred creds;
|
|
|
|
|
gboolean creds_has;
|
2016-02-12 15:46:20 +01:00
|
|
|
nm_auto_free unsigned char *buf = NULL;
|
2015-12-11 19:25:00 +01:00
|
|
|
|
|
|
|
|
continue_reading:
|
2016-02-12 15:46:20 +01:00
|
|
|
g_clear_pointer (&buf, free);
|
2018-12-23 14:39:22 +01:00
|
|
|
n = nl_recv (sk, &nla, &buf, &creds, &creds_has);
|
2015-12-11 19:25:00 +01:00
|
|
|
|
2018-02-17 16:33:00 +01:00
|
|
|
if (n <= 0) {
|
|
|
|
|
|
2018-12-22 13:35:57 +01:00
|
|
|
if (n == -NME_NL_MSG_TRUNC) {
|
2018-02-17 16:33:00 +01:00
|
|
|
int buf_size;
|
|
|
|
|
|
|
|
|
|
/* the message receive buffer was too small. We lost one message, which
|
|
|
|
|
* is unfortunate. Try to double the buffer size for the next time. */
|
|
|
|
|
buf_size = nl_socket_get_msg_buf_size (sk);
|
|
|
|
|
if (buf_size < 512*1024) {
|
|
|
|
|
buf_size *= 2;
|
|
|
|
|
_LOGT ("netlink: recvmsg: increase message buffer size for recvmsg() to %d bytes", buf_size);
|
|
|
|
|
if (nl_socket_set_msg_buf_size (sk, buf_size) < 0)
|
|
|
|
|
nm_assert_not_reached ();
|
|
|
|
|
if (!handle_events)
|
|
|
|
|
goto continue_reading;
|
|
|
|
|
}
|
2016-01-25 15:08:32 +01:00
|
|
|
}
|
2015-12-13 10:09:02 +01:00
|
|
|
|
2015-12-11 19:25:00 +01:00
|
|
|
return n;
|
2018-02-17 16:33:00 +01:00
|
|
|
}
|
2015-12-11 19:25:00 +01:00
|
|
|
|
|
|
|
|
hdr = (struct nlmsghdr *) buf;
|
2015-12-13 09:59:10 +01:00
|
|
|
while (nlmsg_ok (hdr, n)) {
|
2016-02-12 15:46:20 +01:00
|
|
|
nm_auto_nlmsg struct nl_msg *msg = NULL;
|
2015-12-14 14:47:41 +01:00
|
|
|
gboolean abort_parsing = FALSE;
|
2016-04-07 21:19:45 +02:00
|
|
|
gboolean process_valid_msg = FALSE;
|
|
|
|
|
guint32 seq_number;
|
2017-08-04 09:53:24 +02:00
|
|
|
char buf_nlmsghdr[400];
|
2018-03-09 15:50:16 +01:00
|
|
|
const char *extack_msg = NULL;
|
2015-12-14 14:47:41 +01:00
|
|
|
|
2018-02-16 17:43:26 +01:00
|
|
|
msg = nlmsg_alloc_convert (hdr);
|
2015-12-11 19:25:00 +01:00
|
|
|
|
2015-12-13 09:59:10 +01:00
|
|
|
nlmsg_set_proto (msg, NETLINK_ROUTE);
|
|
|
|
|
nlmsg_set_src (msg, &nla);
|
2015-12-11 19:25:00 +01:00
|
|
|
|
2018-12-23 14:39:22 +01:00
|
|
|
if (!creds_has || creds.pid) {
|
|
|
|
|
if (!creds_has)
|
2016-02-12 16:36:25 +01:00
|
|
|
_LOGT ("netlink: recvmsg: received message without credentials");
|
2018-12-23 14:39:22 +01:00
|
|
|
else
|
|
|
|
|
_LOGT ("netlink: recvmsg: received non-kernel message (pid %d)", creds.pid);
|
2016-02-12 16:39:05 +01:00
|
|
|
err = 0;
|
2015-12-13 10:03:22 +01:00
|
|
|
goto stop;
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-04 09:53:24 +02:00
|
|
|
_LOGt ("netlink: recvmsg: new message %s",
|
2018-02-16 10:40:33 +01:00
|
|
|
nl_nlmsghdr_to_str (hdr, buf_nlmsghdr, sizeof (buf_nlmsghdr)));
|
2015-12-14 14:47:41 +01:00
|
|
|
|
2018-12-23 14:39:22 +01:00
|
|
|
nlmsg_set_creds (msg, &creds);
|
2015-12-11 19:25:00 +01:00
|
|
|
|
|
|
|
|
if (hdr->nlmsg_flags & NLM_F_MULTI)
|
2018-02-17 16:33:00 +01:00
|
|
|
multipart = TRUE;
|
2015-12-11 19:25:00 +01:00
|
|
|
|
|
|
|
|
if (hdr->nlmsg_flags & NLM_F_DUMP_INTR) {
|
|
|
|
|
/*
|
|
|
|
|
* We have to continue reading to clear
|
|
|
|
|
* all messages until a NLMSG_DONE is
|
|
|
|
|
* received and report the inconsistency.
|
|
|
|
|
*/
|
2018-02-17 16:33:00 +01:00
|
|
|
interrupted = TRUE;
|
2015-12-11 19:25:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Other side wishes to see an ack for this message */
|
|
|
|
|
if (hdr->nlmsg_flags & NLM_F_ACK) {
|
|
|
|
|
/* FIXME: implement */
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
seq_result = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_UNKNOWN;
|
|
|
|
|
|
2015-12-11 19:25:00 +01:00
|
|
|
if (hdr->nlmsg_type == NLMSG_DONE) {
|
|
|
|
|
/* messages terminates a multipart message, this is
|
|
|
|
|
* usually the end of a message and therefore we slip
|
|
|
|
|
* out of the loop by default. the user may overrule
|
|
|
|
|
* this action by skipping this packet. */
|
2018-02-17 16:33:00 +01:00
|
|
|
multipart = FALSE;
|
2015-12-14 14:47:41 +01:00
|
|
|
seq_result = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK;
|
2015-12-11 19:25:00 +01:00
|
|
|
} else if (hdr->nlmsg_type == NLMSG_NOOP) {
|
|
|
|
|
/* Message to be ignored, the default action is to
|
|
|
|
|
* skip this message if no callback is specified. The
|
|
|
|
|
* user may overrule this action by returning
|
|
|
|
|
* NL_PROCEED. */
|
|
|
|
|
} else if (hdr->nlmsg_type == NLMSG_OVERRUN) {
|
|
|
|
|
/* Data got lost, report back to user. The default action is to
|
|
|
|
|
* quit parsing. The user may overrule this action by retuning
|
|
|
|
|
* NL_SKIP or NL_PROCEED (dangerous) */
|
2018-12-22 13:35:57 +01:00
|
|
|
err = -NME_NL_MSG_OVERFLOW;
|
2015-12-14 14:47:41 +01:00
|
|
|
abort_parsing = TRUE;
|
2015-12-11 19:25:00 +01:00
|
|
|
} else if (hdr->nlmsg_type == NLMSG_ERROR) {
|
|
|
|
|
/* Message carries a nlmsgerr */
|
2015-12-13 09:59:10 +01:00
|
|
|
struct nlmsgerr *e = nlmsg_data (hdr);
|
2015-12-11 19:25:00 +01:00
|
|
|
|
2015-12-13 09:59:10 +01:00
|
|
|
if (hdr->nlmsg_len < nlmsg_size (sizeof (*e))) {
|
2015-12-11 19:25:00 +01:00
|
|
|
/* Truncated error message, the default action
|
|
|
|
|
* is to stop parsing. The user may overrule
|
|
|
|
|
* this action by returning NL_SKIP or
|
|
|
|
|
* NL_PROCEED (dangerous) */
|
2018-12-22 13:35:57 +01:00
|
|
|
err = -NME_NL_MSG_TRUNC;
|
2015-12-14 14:47:41 +01:00
|
|
|
abort_parsing = TRUE;
|
2015-12-11 19:25:00 +01:00
|
|
|
} else if (e->error) {
|
2019-01-31 16:53:45 +01:00
|
|
|
int errsv = nm_errno_native (e->error);
|
2018-03-08 17:01:35 +01:00
|
|
|
|
|
|
|
|
if ( NM_FLAGS_HAS (hdr->nlmsg_flags, NLM_F_ACK_TLVS)
|
|
|
|
|
&& hdr->nlmsg_len >= sizeof (*e) + e->msg.nlmsg_len) {
|
2019-02-15 11:33:57 +01:00
|
|
|
static const struct nla_policy policy[] = {
|
2018-03-08 17:01:35 +01:00
|
|
|
[NLMSGERR_ATTR_MSG] = { .type = NLA_STRING },
|
|
|
|
|
[NLMSGERR_ATTR_OFFS] = { .type = NLA_U32 },
|
|
|
|
|
};
|
2019-02-15 11:33:57 +01:00
|
|
|
struct nlattr *tb[G_N_ELEMENTS (policy)];
|
2018-03-08 17:01:35 +01:00
|
|
|
struct nlattr *tlvs;
|
|
|
|
|
|
|
|
|
|
tlvs = (struct nlattr *) ((char *) e + sizeof (*e) + e->msg.nlmsg_len - NLMSG_HDRLEN);
|
2019-02-15 11:33:57 +01:00
|
|
|
if (nla_parse_arr (tb,
|
|
|
|
|
tlvs,
|
|
|
|
|
hdr->nlmsg_len - sizeof (*e) - e->msg.nlmsg_len,
|
|
|
|
|
policy) >= 0) {
|
2018-03-08 17:01:35 +01:00
|
|
|
if (tb[NLMSGERR_ATTR_MSG])
|
|
|
|
|
extack_msg = nla_get_string (tb[NLMSGERR_ATTR_MSG]);
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-12-14 14:47:41 +01:00
|
|
|
|
2015-12-11 19:25:00 +01:00
|
|
|
/* Error message reported back from kernel. */
|
2018-03-08 17:01:35 +01:00
|
|
|
_LOGD ("netlink: recvmsg: error message from kernel: %s (%d)%s%s%s for request %d",
|
2019-01-31 17:22:18 +01:00
|
|
|
nm_strerror_native (errsv),
|
2015-12-14 14:47:41 +01:00
|
|
|
errsv,
|
2018-03-08 17:01:35 +01:00
|
|
|
NM_PRINT_FMT_QUOTED (extack_msg, " \"", extack_msg, "\"", ""),
|
2015-12-14 14:47:41 +01:00
|
|
|
nlmsg_hdr (msg)->nlmsg_seq);
|
2019-01-31 16:53:45 +01:00
|
|
|
seq_result = -NM_ERRNO_NATIVE (errsv);
|
2015-12-14 14:47:41 +01:00
|
|
|
} else
|
|
|
|
|
seq_result = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK;
|
2016-04-07 21:19:45 +02:00
|
|
|
} else
|
|
|
|
|
process_valid_msg = TRUE;
|
|
|
|
|
|
|
|
|
|
seq_number = nlmsg_hdr (msg)->nlmsg_seq;
|
|
|
|
|
|
platform: track refresh-all actions that are in progress
We aim to keep the platform cache up-to-date only via the netlink
events. However, due to kernel shortcomings we often have to resync
by re-requesting the data, which especially for routes and addresses
means a full dump (as you cannot request only specific route/address
information).
Thus it makes sense to avoid expensive dumps whenever we can.
We schedule dumps via "delayed-actions" and that is already smart
so that muliple schedulings are combined. However, before requesting
a new dump, we clear the flag that indicates that a dump is scheduled.
Thus, while processing the result of of a dump, we would re-schedule
anew which can be necessary in some cases.
In certain cases, we don't require a full resync, when we are in the
middle of processing a dump, because that one dump will provide us
with the full picture. Thus, we can avoid scheduling a new dump if
- we already scheduled a delayed action
- we are in the middle or processing a dump.
This can now be checked via delayed_action_refresh_all_in_progress().
2016-04-07 19:02:19 +02:00
|
|
|
/* check whether the seq number is different from before, and
|
|
|
|
|
* whether the previous number (@nlh_seq_last_seen) is a pending
|
|
|
|
|
* refresh-all request. In that case, the pending request is thereby
|
|
|
|
|
* completed.
|
|
|
|
|
*
|
|
|
|
|
* We must do that before processing the message with event_valid_msg(),
|
|
|
|
|
* because we must track the completion of the pending request before that. */
|
|
|
|
|
event_seq_check_refresh_all (platform, seq_number);
|
|
|
|
|
|
2016-04-07 21:19:45 +02:00
|
|
|
if (process_valid_msg) {
|
2015-12-11 19:25:00 +01:00
|
|
|
/* Valid message (not checking for MULTIPART bit to
|
|
|
|
|
* get along with broken kernels. NL_SKIP has no
|
|
|
|
|
* effect on this. */
|
2016-01-24 18:46:14 +01:00
|
|
|
|
|
|
|
|
event_valid_msg (platform, msg, handle_events);
|
|
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
seq_result = WAIT_FOR_NL_RESPONSE_RESULT_RESPONSE_OK;
|
2015-12-11 19:25:00 +01:00
|
|
|
}
|
2015-12-14 14:47:41 +01:00
|
|
|
|
2018-03-09 15:50:16 +01:00
|
|
|
event_seq_check (platform, seq_number, seq_result, extack_msg);
|
2015-12-14 14:47:41 +01:00
|
|
|
|
2016-02-12 16:41:51 +01:00
|
|
|
if (abort_parsing)
|
|
|
|
|
goto stop;
|
2016-02-12 16:20:52 +01:00
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
|
hdr = nlmsg_next (hdr, &n);
|
2015-12-11 19:25:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (multipart) {
|
|
|
|
|
/* Multipart message not yet complete, continue reading */
|
|
|
|
|
goto continue_reading;
|
|
|
|
|
}
|
|
|
|
|
stop:
|
2016-01-25 14:38:35 +01:00
|
|
|
if (!handle_events) {
|
|
|
|
|
/* when we don't handle events, we want to drain all messages from the socket
|
|
|
|
|
* without handling the messages (but still check for sequence numbers).
|
|
|
|
|
* Repeat reading. */
|
|
|
|
|
goto continue_reading;
|
|
|
|
|
}
|
2018-02-16 17:43:26 +01:00
|
|
|
|
2015-12-11 19:25:00 +01:00
|
|
|
if (interrupted)
|
2018-12-22 13:35:57 +01:00
|
|
|
return -NME_NL_DUMP_INTR;
|
2015-12-11 19:25:00 +01:00
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
static gboolean
|
2015-12-15 10:40:41 +01:00
|
|
|
event_handler_read_netlink (NMPlatform *platform, gboolean wait_for_acks)
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
2018-02-19 13:42:03 +01:00
|
|
|
int r;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
struct pollfd pfd;
|
|
|
|
|
gboolean any = FALSE;
|
2015-12-14 14:47:41 +01:00
|
|
|
int timeout_ms;
|
|
|
|
|
struct {
|
|
|
|
|
guint32 seq_number;
|
|
|
|
|
gint64 timeout_abs_ns;
|
2018-02-19 13:42:03 +01:00
|
|
|
gint64 now_ns;
|
|
|
|
|
} next;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2018-02-19 13:42:03 +01:00
|
|
|
if (!nm_platform_netns_push (platform, &netns)) {
|
|
|
|
|
delayed_action_wait_for_nl_response_complete_all (platform,
|
|
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_SETNS);
|
2016-02-19 01:06:28 +01:00
|
|
|
return FALSE;
|
2018-02-19 13:42:03 +01:00
|
|
|
}
|
2016-02-19 01:06:28 +01:00
|
|
|
|
2018-02-19 13:42:03 +01:00
|
|
|
for (;;) {
|
|
|
|
|
for (;;) {
|
|
|
|
|
int nle;
|
2015-12-15 10:40:41 +01:00
|
|
|
|
|
|
|
|
nle = event_handler_recvmsgs (platform, TRUE);
|
|
|
|
|
|
2017-09-07 09:22:35 +02:00
|
|
|
if (nle < 0) {
|
2015-12-15 10:40:41 +01:00
|
|
|
switch (nle) {
|
2018-02-16 15:03:42 +01:00
|
|
|
case -EAGAIN:
|
2015-12-15 10:40:41 +01:00
|
|
|
goto after_read;
|
2018-12-22 13:35:57 +01:00
|
|
|
case -NME_NL_DUMP_INTR:
|
|
|
|
|
_LOGD ("netlink: read: uncritical failure to retrieve incoming events: %s (%d)", nm_strerror (nle), nle);
|
2015-12-15 10:40:41 +01:00
|
|
|
break;
|
2018-12-22 13:35:57 +01:00
|
|
|
case -NME_NL_MSG_TRUNC:
|
2018-02-17 16:33:00 +01:00
|
|
|
case -ENOBUFS:
|
2016-11-30 16:40:54 +01:00
|
|
|
_LOGI ("netlink: read: %s. Need to resynchronize platform cache",
|
|
|
|
|
({
|
|
|
|
|
const char *_reason = "unknown";
|
|
|
|
|
switch (nle) {
|
2018-12-22 13:35:57 +01:00
|
|
|
case -NME_NL_MSG_TRUNC: _reason = "message truncated"; break;
|
2018-02-17 16:33:00 +01:00
|
|
|
case -ENOBUFS: _reason = "too many netlink events"; break;
|
2016-11-30 16:40:54 +01:00
|
|
|
}
|
|
|
|
|
_reason;
|
|
|
|
|
}));
|
2015-12-15 10:40:41 +01:00
|
|
|
event_handler_recvmsgs (platform, FALSE);
|
2018-02-19 13:42:03 +01:00
|
|
|
delayed_action_wait_for_nl_response_complete_all (platform,
|
|
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_RESYNC);
|
|
|
|
|
|
2015-12-15 10:40:41 +01:00
|
|
|
delayed_action_schedule (platform,
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES |
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_QDISCS |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_TFILTERS,
|
2015-12-15 10:40:41 +01:00
|
|
|
NULL);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
2018-12-22 13:35:57 +01:00
|
|
|
_LOGE ("netlink: read: failed to retrieve incoming events: %s (%d)", nm_strerror (nle), nle);
|
2015-12-15 10:40:41 +01:00
|
|
|
break;
|
2017-09-07 09:22:35 +02:00
|
|
|
}
|
2015-12-15 10:40:41 +01:00
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
any = TRUE;
|
2015-12-15 10:40:41 +01:00
|
|
|
}
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
after_read:
|
|
|
|
|
|
|
|
|
|
if (!NM_FLAGS_HAS (priv->delayed_action.flags, DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE))
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
return any;
|
|
|
|
|
|
2018-02-19 13:42:03 +01:00
|
|
|
delayed_action_wait_for_nl_response_complete_check (platform,
|
|
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_UNKNOWN,
|
|
|
|
|
&next.seq_number,
|
|
|
|
|
&next.timeout_abs_ns,
|
|
|
|
|
&next.now_ns);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
2015-12-14 14:47:41 +01:00
|
|
|
if ( !wait_for_acks
|
|
|
|
|
|| !NM_FLAGS_HAS (priv->delayed_action.flags, DELAYED_ACTION_TYPE_WAIT_FOR_NL_RESPONSE))
|
|
|
|
|
return any;
|
|
|
|
|
|
2018-02-19 13:42:03 +01:00
|
|
|
nm_assert (next.seq_number);
|
|
|
|
|
nm_assert (next.now_ns > 0);
|
|
|
|
|
nm_assert (next.timeout_abs_ns > next.now_ns);
|
2015-12-14 14:47:41 +01:00
|
|
|
|
2018-02-19 13:42:03 +01:00
|
|
|
_LOGT ("netlink: read: wait for ACK for sequence number %u...", next.seq_number);
|
2015-12-14 14:47:41 +01:00
|
|
|
|
2018-02-19 13:42:03 +01:00
|
|
|
timeout_ms = (next.timeout_abs_ns - next.now_ns) / (NM_UTILS_NS_PER_SECOND / 1000);
|
2015-12-14 14:47:41 +01:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
memset (&pfd, 0, sizeof (pfd));
|
2015-12-15 10:51:26 +01:00
|
|
|
pfd.fd = nl_socket_get_fd (priv->nlh);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
pfd.events = POLLIN;
|
2015-12-14 14:47:41 +01:00
|
|
|
r = poll (&pfd, 1, MAX (1, timeout_ms));
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
|
|
|
|
|
if (r == 0) {
|
2015-12-14 14:47:41 +01:00
|
|
|
/* timeout and there is nothing to read. */
|
|
|
|
|
goto after_read;
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
}
|
2018-02-19 13:42:03 +01:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
if (r < 0) {
|
|
|
|
|
int errsv = errno;
|
|
|
|
|
|
|
|
|
|
if (errsv != EINTR) {
|
2019-01-31 17:22:18 +01:00
|
|
|
_LOGE ("netlink: read: poll failed with %s", nm_strerror_native (errsv));
|
2015-12-14 14:47:41 +01:00
|
|
|
delayed_action_wait_for_nl_response_complete_all (platform, WAIT_FOR_NL_RESPONSE_RESULT_FAILED_POLL);
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
return any;
|
|
|
|
|
}
|
|
|
|
|
/* Continue to read again, even if there might be nothing to read after EINTR. */
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2015-04-06 18:29:36 +02:00
|
|
|
static void
|
2017-03-12 15:54:02 +01:00
|
|
|
cache_update_link_udev (NMPlatform *platform,
|
|
|
|
|
int ifindex,
|
|
|
|
|
struct udev_device *udevice)
|
2015-04-06 18:29:36 +02:00
|
|
|
{
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
nm_auto_nmpobj const NMPObject *obj_old = NULL;
|
|
|
|
|
nm_auto_nmpobj const NMPObject *obj_new = NULL;
|
2015-04-06 18:29:36 +02:00
|
|
|
NMPCacheOpsType cache_op;
|
|
|
|
|
|
2017-06-29 11:18:10 +02:00
|
|
|
cache_op = nmp_cache_update_link_udev (nm_platform_get_cache (platform), ifindex, udevice, &obj_old, &obj_new);
|
2016-02-19 01:06:28 +01:00
|
|
|
|
|
|
|
|
if (cache_op != NMP_CACHE_OPS_UNCHANGED) {
|
|
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
|
|
|
|
|
platform: use NMDedupMultiIndex for routes in NMPCache
Rework platform object cache to use NMDedupMultiIndex.
Already previously, NMPCache used NMMultiIndex and had thus
O(1) for most operations. What is new is:
- Contrary to NMMultiIndex, NMDedupMultiIndex preserves the order of
the cached items. That is crucial to handle routes properly as kernel
will replace the first matching route based on network/plen/metric
properties. See related bug rh#1337855.
Without tracking the order of routes as they are exposed
by kernel, we cannot properly maintain the route cache.
- All NMPObject instances are now treated immutable, refcounted
and get de-duplicated via NMDedupMultiIndex. This allows
to have a global NMDedupMultiIndex that can be shared with
NMIP4Config and NMRouteManager. It also allows to share the
objects themselves.
Immutable objects are so much nicer. We can get rid of the
update pre-hook callback, which was required previously because
we would mutate the object inplace. Now, we can just update
the cache, and compare obj_old and obj_new after the fact.
- NMMultiIndex was treated as an internal of NMPCache. On the other
hand, NMDedupMultiIndex exposes NMDedupMultiHeadEntry, which is
basically an object that allows to iterate over all related
objects. That means, we can now lookup objects in the cache
and give the NMDedupMultiHeadEntry instance to the caller,
which then can iterate the list on it's own -- without need
for copying anything.
Currently, at various places we still create copies of lookup
results. That can be improved later.
The ability to share NMPObject instances should enable us to
significantly improve performance and scale with large number
of routes.
Of course there is a memory overhead of having an index for each list
entry. Each NMPObject may also require an NMDedupMultiEntry,
NMDedupMultiHeadEntry, and NMDedupMultiBox item, which are tracked
in a GHashTable. Optimally, one NMDedupMultiHeadEntry is the head
for multiple objects, and NMDedupMultiBox is able to deduplicate several
NMPObjects, so that there is a net saving.
Also, each object type has several indexes of type NMPCacheIdType.
So, worst case an NMPlatformIP4Route in the platform cache is tracked
by 8 NMPCacheIdType indexes, for each we require a NMDedupMultiEntry,
plus the shared NMDedupMultiHeadEntry. The NMDedupMultiBox instance
is shared between the 8 indexes (and possibly other).
2017-06-21 10:53:34 +02:00
|
|
|
cache_on_change (platform, cache_op, obj_old, obj_new);
|
2016-02-19 01:06:28 +01:00
|
|
|
if (!nm_platform_netns_push (platform, &netns))
|
|
|
|
|
return;
|
2017-06-29 13:13:54 +02:00
|
|
|
nm_platform_cache_update_emit_signal (platform, cache_op, obj_old, obj_new);
|
2016-02-19 01:06:28 +01:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
}
|
|
|
|
|
|
2013-05-29 12:00:50 -03:00
|
|
|
static void
|
|
|
|
|
udev_device_added (NMPlatform *platform,
|
2017-03-12 15:54:02 +01:00
|
|
|
struct udev_device *udevice)
|
2013-05-29 12:00:50 -03:00
|
|
|
{
|
2013-08-07 12:35:05 -05:00
|
|
|
const char *ifname;
|
2017-03-12 15:54:02 +01:00
|
|
|
const char *ifindex_s;
|
2013-05-29 12:00:50 -03:00
|
|
|
int ifindex;
|
|
|
|
|
|
2017-03-12 15:54:02 +01:00
|
|
|
ifname = udev_device_get_sysname (udevice);
|
2013-05-29 12:00:50 -03:00
|
|
|
if (!ifname) {
|
2015-08-30 16:01:55 +02:00
|
|
|
_LOGD ("udev-add: failed to get device's interface");
|
2013-05-29 12:00:50 -03:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-12 15:54:02 +01:00
|
|
|
ifindex_s = udev_device_get_property_value (udevice, "IFINDEX");
|
|
|
|
|
if (!ifindex_s) {
|
2016-03-04 13:19:34 +01:00
|
|
|
_LOGW ("udev-add[%s]failed to get device's ifindex", ifname);
|
2014-04-17 14:57:55 +02:00
|
|
|
return;
|
|
|
|
|
}
|
2017-03-12 15:54:02 +01:00
|
|
|
ifindex = _nm_utils_ascii_str_to_int64 (ifindex_s, 10, 1, G_MAXINT, 0);
|
2014-04-17 14:57:55 +02:00
|
|
|
if (ifindex <= 0) {
|
2016-03-04 13:19:34 +01:00
|
|
|
_LOGW ("udev-add[%s]: retrieved invalid IFINDEX=%d", ifname, ifindex);
|
2013-05-29 12:00:50 -03:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-12 15:54:02 +01:00
|
|
|
if (!udev_device_get_syspath (udevice)) {
|
2016-03-04 13:19:34 +01:00
|
|
|
_LOGD ("udev-add[%s,%d]: couldn't determine device path; ignoring...", ifname, ifindex);
|
2013-05-29 12:00:50 -03:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 13:19:34 +01:00
|
|
|
_LOGT ("udev-add[%s,%d]: device added", ifname, ifindex);
|
2017-03-12 15:54:02 +01:00
|
|
|
cache_update_link_udev (platform, ifindex, udevice);
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
}
|
2014-04-17 14:57:55 +02:00
|
|
|
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
static gboolean
|
2017-03-12 15:54:02 +01:00
|
|
|
_udev_device_removed_match_link (const NMPObject *obj, gpointer udevice)
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
{
|
2017-03-12 15:54:02 +01:00
|
|
|
return obj->_link.udev.device == udevice;
|
2013-05-29 12:00:50 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
udev_device_removed (NMPlatform *platform,
|
2017-03-12 15:54:02 +01:00
|
|
|
struct udev_device *udevice)
|
2013-05-29 12:00:50 -03:00
|
|
|
{
|
2017-03-12 15:54:02 +01:00
|
|
|
const char *ifindex_s;
|
2013-07-26 17:03:39 +02:00
|
|
|
int ifindex = 0;
|
2013-05-29 12:00:50 -03:00
|
|
|
|
2017-03-12 15:54:02 +01:00
|
|
|
ifindex_s = udev_device_get_property_value (udevice, "IFINDEX");
|
|
|
|
|
ifindex = _nm_utils_ascii_str_to_int64 (ifindex_s, 10, 1, G_MAXINT, 0);
|
|
|
|
|
if (ifindex <= 0) {
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
const NMPObject *obj;
|
2013-05-29 12:00:50 -03:00
|
|
|
|
2017-06-29 11:18:10 +02:00
|
|
|
obj = nmp_cache_lookup_link_full (nm_platform_get_cache (platform),
|
2017-03-12 15:54:02 +01:00
|
|
|
0, NULL, FALSE, NM_LINK_TYPE_NONE, _udev_device_removed_match_link, udevice);
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
if (obj)
|
|
|
|
|
ifindex = obj->link.ifindex;
|
2013-05-29 12:00:50 -03:00
|
|
|
}
|
2013-07-26 17:03:39 +02:00
|
|
|
|
2015-08-30 16:01:55 +02:00
|
|
|
_LOGD ("udev-remove: IFINDEX=%d", ifindex);
|
2014-04-17 14:57:55 +02:00
|
|
|
if (ifindex <= 0)
|
|
|
|
|
return;
|
2013-07-26 17:03:39 +02:00
|
|
|
|
platform: use new platform caching
Switch platform caching implementation. Instead of caching libnl
objects, cache our own types.
Don't remove yet the now obsolete functions.
Advantage:
* Performance
- as we now cache our native NMPlatformObject instances, we no longer
have to convert libnl objects every time we access the platform
cache.
- for most cases, access is now O(1) because we can lookup the object
in a hash table. Note that ip4_address_get_all() still has to
create a copy of the result (O(n)), but as the caller is about to
use those elements, he cannot do better then O(n) anyway.
* We cache our own native types and have full control over them. We
cannot extend the libnl objects, which has many short-commings:
- _rtnl_addr_hack_lifetimes_rel_to_abs() to convert the timestamps
to absolute values (and back).
- hack_empty_master_iff_lower_up() would modify the internal flag,
but it looses the original value. That means, we can only hack
the state before putting a link into the cache, but we cannot revert
that change, when a slave in the cache changes state.
That was previously solved by always refetching the master when
a slave changed. Now we can re-evaluate the connected state
(DELAYED_ACTION_TYPE_MASTER_CONNECTED).
- we implement functions like equality, to-string as most suitable
for us. Before we needed hacks like nm_nl_object_diff(),
nm_nl_cache_search(), route_search_cache().
- we can extend our objects with exactly those properties we care,
and possibly additional properties that are not representable in
the libnl objects.
- we no longer cache RTM_F_CLONED routes and they get rejected early
on as we receive them.
- In the future, maybe it'd be interesting the make platform objects
immutable (and ref-counted) and expose them directly.
* Previous implementation did not order the refresh of objects but
called check_cache_items(). Now, those actions are delayed and
combined in an attempt to reduce the overall number of reloads.
Realize how expensive a check_cache_items() for addresses and routes
was: it would iterate all addresses/routes and call refresh_object().
The latter obtains a full dump of *all* objects again, and ignores
all but the needle.
Note that we probably still schedule some delayed actions that
are not needed.
Later we can optimize that further (related bug bgo #747985).
While some of these points could also have been implemented with
caching of libnl objects, that would have become hard to maintain.
https://bugzilla.gnome.org/show_bug.cgi?id=747981
2015-05-05 02:30:25 +02:00
|
|
|
cache_update_link_udev (platform, ifindex, NULL);
|
2013-05-29 12:00:50 -03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2017-03-12 15:54:02 +01:00
|
|
|
handle_udev_event (NMUdevClient *udev_client,
|
|
|
|
|
struct udev_device *udevice,
|
2013-05-29 12:00:50 -03:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
2016-02-19 01:06:28 +01:00
|
|
|
nm_auto_pop_netns NMPNetns *netns = NULL;
|
2013-05-29 12:00:50 -03:00
|
|
|
NMPlatform *platform = NM_PLATFORM (user_data);
|
|
|
|
|
const char *subsys;
|
2013-10-15 19:45:42 +02:00
|
|
|
const char *ifindex;
|
2013-10-16 18:24:59 +02:00
|
|
|
guint64 seqnum;
|
2017-03-12 15:54:02 +01:00
|
|
|
const char *action;
|
|
|
|
|
|
|
|
|
|
action = udev_device_get_action (udevice);
|
|
|
|
|
g_return_if_fail (action);
|
2013-05-29 12:00:50 -03:00
|
|
|
|
2017-03-12 15:54:02 +01:00
|
|
|
subsys = udev_device_get_subsystem (udevice);
|
|
|
|
|
g_return_if_fail (nm_streq0 (subsys, "net"));
|
2013-05-29 12:00:50 -03:00
|
|
|
|
2016-02-19 01:06:28 +01:00
|
|
|
if (!nm_platform_netns_push (platform, &netns))
|
|
|
|
|
return;
|
|
|
|
|
|
2017-03-12 15:54:02 +01:00
|
|
|
ifindex = udev_device_get_property_value (udevice, "IFINDEX");
|
|
|
|
|
seqnum = udev_device_get_seqnum (udevice);
|
2015-08-30 16:01:55 +02:00
|
|
|
_LOGD ("UDEV event: action '%s' subsys '%s' device '%s' (%s); seqnum=%" G_GUINT64_FORMAT,
|
2017-03-12 15:54:02 +01:00
|
|
|
action, subsys, udev_device_get_sysname (udevice),
|
2018-04-24 11:20:03 +02:00
|
|
|
ifindex ?: "unknown", seqnum);
|
2013-05-29 12:00:50 -03:00
|
|
|
|
2017-03-12 15:54:02 +01:00
|
|
|
if (NM_IN_STRSET (action, "add", "move"))
|
|
|
|
|
udev_device_added (platform, udevice);
|
|
|
|
|
else if (NM_IN_STRSET (action, "remove"))
|
|
|
|
|
udev_device_removed (platform, udevice);
|
2013-05-29 12:00:50 -03:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2013-05-29 12:00:50 -03:00
|
|
|
|
2018-09-04 16:43:44 +02:00
|
|
|
void
|
|
|
|
|
nm_linux_platform_setup (void)
|
|
|
|
|
{
|
|
|
|
|
nm_platform_setup (nm_linux_platform_new (FALSE, FALSE));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2013-03-27 22:23:24 +01:00
|
|
|
static void
|
2015-05-10 09:16:31 +02:00
|
|
|
nm_linux_platform_init (NMLinuxPlatform *self)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
2016-09-29 13:49:01 +02:00
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (self);
|
2015-05-10 09:16:31 +02:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
priv->delayed_action.list_master_connected = g_ptr_array_new ();
|
|
|
|
|
priv->delayed_action.list_refresh_link = g_ptr_array_new ();
|
2015-12-14 14:47:41 +01:00
|
|
|
priv->delayed_action.list_wait_for_nl_response = g_array_new (FALSE, TRUE, sizeof (DelayedActionWaitForNlResponseData));
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
2015-04-18 12:53:45 +02:00
|
|
|
static void
|
|
|
|
|
constructed (GObject *_object)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
2015-04-18 12:53:45 +02:00
|
|
|
NMPlatform *platform = NM_PLATFORM (_object);
|
2013-03-27 22:23:24 +01:00
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
int channel_flags;
|
|
|
|
|
gboolean status;
|
|
|
|
|
int nle;
|
|
|
|
|
|
2016-02-19 01:06:28 +01:00
|
|
|
nm_assert (!platform->_netns || platform->_netns == nmp_netns_get_current ());
|
|
|
|
|
|
2017-06-29 10:51:38 +02:00
|
|
|
if (nm_platform_get_use_udev (platform)) {
|
|
|
|
|
priv->udev_client = nm_udev_client_new ((const char *[]) { "net", NULL },
|
|
|
|
|
handle_udev_event, platform);
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-19 01:06:28 +01:00
|
|
|
_LOGD ("create (%s netns, %s, %s udev)",
|
|
|
|
|
!platform->_netns ? "ignore" : "use",
|
|
|
|
|
!platform->_netns && nmp_netns_is_initial ()
|
|
|
|
|
? "initial netns"
|
|
|
|
|
: (!nmp_netns_get_current ()
|
|
|
|
|
? "no netns support"
|
|
|
|
|
: nm_sprintf_bufa (100, "in netns[%p]%s",
|
|
|
|
|
nmp_netns_get_current (),
|
|
|
|
|
nmp_netns_get_current () == nmp_netns_get_initial () ? "/main" : "")),
|
2017-06-29 11:18:10 +02:00
|
|
|
nm_platform_get_use_udev (platform) ? "use" : "no");
|
2015-05-12 07:14:55 +02:00
|
|
|
|
2018-05-15 20:29:30 +02:00
|
|
|
|
|
|
|
|
priv->genl = nl_socket_alloc ();
|
|
|
|
|
g_assert (priv->genl);
|
|
|
|
|
|
|
|
|
|
nle = nl_connect (priv->genl, NETLINK_GENERIC);
|
|
|
|
|
if (nle) {
|
|
|
|
|
_LOGE ("unable to connect the generic netlink socket \"%s\" (%d)",
|
2018-12-22 13:35:57 +01:00
|
|
|
nm_strerror (nle), -nle);
|
2018-05-15 20:29:30 +02:00
|
|
|
nl_socket_free (priv->genl);
|
|
|
|
|
priv->genl = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-04 18:08:04 +01:00
|
|
|
priv->nlh = nl_socket_alloc ();
|
|
|
|
|
g_assert (priv->nlh);
|
|
|
|
|
|
|
|
|
|
nle = nl_connect (priv->nlh, NETLINK_ROUTE);
|
|
|
|
|
g_assert (!nle);
|
|
|
|
|
nle = nl_socket_set_passcred (priv->nlh, 1);
|
|
|
|
|
g_assert (!nle);
|
|
|
|
|
|
|
|
|
|
/* No blocking for event socket, so that we can drain it safely. */
|
|
|
|
|
nle = nl_socket_set_nonblocking (priv->nlh);
|
|
|
|
|
g_assert (!nle);
|
|
|
|
|
|
2016-01-25 15:22:44 +01:00
|
|
|
/* use 8 MB for receive socket kernel queue. */
|
|
|
|
|
nle = nl_socket_set_buffer_size (priv->nlh, 8*1024*1024, 0);
|
2016-01-04 18:08:04 +01:00
|
|
|
g_assert (!nle);
|
|
|
|
|
|
2018-03-08 17:01:35 +01:00
|
|
|
nle = nl_socket_set_ext_ack (priv->nlh, TRUE);
|
|
|
|
|
if (nle)
|
|
|
|
|
_LOGD ("could not enable extended acks on netlink socket");
|
|
|
|
|
|
2016-11-30 16:40:54 +01:00
|
|
|
/* explicitly set the msg buffer size and disable MSG_PEEK.
|
2018-12-22 13:35:57 +01:00
|
|
|
* If we later encounter NME_NL_MSG_TRUNC, we will adjust the buffer size. */
|
2016-11-30 16:40:54 +01:00
|
|
|
nl_socket_disable_msg_peek (priv->nlh);
|
platform: increase initial buffer size of libnl's nl_recvmsg() to 32K
Since commit 9fafb382db273160a5e9e10ff84f8a4d2af220d3, we would
explicitly set libnl's socket buffer size to 4*getpagesize().
That is also the default of libnl itself. Additionally, we would
workaround too small buffers by increasing the buffer size up to 512K.
A too small buffer causes messages to be lost. Usually, that only
results in a cache-resync, which isn't too bad. Lost messages are however
a problem if the lost message was an ACK that we were waiting for.
However, it is rather unlikely to happen, because it's expected that
the buffer size gets adjusted already when the cache is filled initially,
before any other requests are pending.
Still, let's increase the default buffer size to 32K, hoping that this
initial value is already large enough to avoid the problem altogether.
Note that iproute2 also uses a buffer size of 32K [1] [2].
Alternatively, we could use MSG_PEEK like systemd does [3]. However,
that requires two syscalls per message.
[1] https://patchwork.ozlabs.org/patch/592178/
[2] https://git.kernel.org/cgit/linux/kernel/git/shemminger/iproute2.git/tree/lib/libnetlink.c?id=f5f760b81250630da23a4021c30e802695be79d2#n274
[3] https://github.com/systemd/systemd/blob/cd66af227416eb7b9f150b92abff4e4a3e92253b/src/libsystemd/sd-netlink/netlink-socket.c#L323
2016-12-02 14:57:23 +01:00
|
|
|
nle = nl_socket_set_msg_buf_size (priv->nlh, 32 * 1024);
|
2016-11-30 16:40:54 +01:00
|
|
|
g_assert (!nle);
|
|
|
|
|
|
2016-01-04 18:08:04 +01:00
|
|
|
nle = nl_socket_add_memberships (priv->nlh,
|
|
|
|
|
RTNLGRP_LINK,
|
|
|
|
|
RTNLGRP_IPV4_IFADDR, RTNLGRP_IPV6_IFADDR,
|
|
|
|
|
RTNLGRP_IPV4_ROUTE, RTNLGRP_IPV6_ROUTE,
|
2017-11-15 20:36:35 +01:00
|
|
|
RTNLGRP_TC,
|
2016-01-04 18:08:04 +01:00
|
|
|
0);
|
|
|
|
|
g_assert (!nle);
|
2015-12-15 10:51:26 +01:00
|
|
|
_LOGD ("Netlink socket for events established: port=%u, fd=%d", nl_socket_get_local_port (priv->nlh), nl_socket_get_fd (priv->nlh));
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2015-12-15 10:51:26 +01:00
|
|
|
priv->event_channel = g_io_channel_unix_new (nl_socket_get_fd (priv->nlh));
|
2013-03-27 22:23:24 +01:00
|
|
|
g_io_channel_set_encoding (priv->event_channel, NULL, NULL);
|
|
|
|
|
|
|
|
|
|
channel_flags = g_io_channel_get_flags (priv->event_channel);
|
|
|
|
|
status = g_io_channel_set_flags (priv->event_channel,
|
2016-03-04 20:57:07 +01:00
|
|
|
channel_flags | G_IO_FLAG_NONBLOCK, NULL);
|
2013-03-27 22:23:24 +01:00
|
|
|
g_assert (status);
|
|
|
|
|
priv->event_id = g_io_add_watch (priv->event_channel,
|
2015-05-12 07:14:55 +02:00
|
|
|
(EVENT_CONDITIONS | ERROR_CONDITIONS | DISCONNECT_CONDITIONS),
|
|
|
|
|
event_handler, platform);
|
2014-07-24 15:57:08 -05:00
|
|
|
|
2015-05-12 07:14:55 +02:00
|
|
|
/* complete construction of the GObject instance before populating the cache. */
|
|
|
|
|
G_OBJECT_CLASS (nm_linux_platform_parent_class)->constructed (_object);
|
|
|
|
|
|
|
|
|
|
_LOGD ("populate platform cache");
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
delayed_action_schedule (platform,
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_LINKS |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ADDRESSES |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ADDRESSES |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP4_ROUTES |
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_IP6_ROUTES |
|
2017-11-15 20:36:35 +01:00
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_QDISCS |
|
|
|
|
|
DELAYED_ACTION_TYPE_REFRESH_ALL_TFILTERS,
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
NULL);
|
2015-05-12 07:14:55 +02:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
delayed_action_handle_all (platform, FALSE);
|
2015-04-13 16:29:37 -05:00
|
|
|
|
2016-03-04 20:57:07 +01:00
|
|
|
/* Set up udev monitoring */
|
|
|
|
|
if (priv->udev_client) {
|
2017-03-12 15:54:02 +01:00
|
|
|
struct udev_enumerate *enumerator;
|
|
|
|
|
struct udev_list_entry *devices, *l;
|
2016-03-04 20:57:07 +01:00
|
|
|
|
|
|
|
|
/* And read initial device list */
|
2017-03-12 15:54:02 +01:00
|
|
|
enumerator = nm_udev_client_enumerate_new (priv->udev_client);
|
|
|
|
|
|
|
|
|
|
udev_enumerate_add_match_is_initialized (enumerator);
|
2014-11-21 15:02:37 +01:00
|
|
|
|
2017-03-12 15:54:02 +01:00
|
|
|
udev_enumerate_scan_devices (enumerator);
|
|
|
|
|
|
|
|
|
|
devices = udev_enumerate_get_list_entry (enumerator);
|
|
|
|
|
for (l = devices; l; l = udev_list_entry_get_next (l)) {
|
|
|
|
|
struct udev_device *udevice;
|
|
|
|
|
|
|
|
|
|
udevice = udev_device_new_from_syspath (udev_enumerate_get_udev (enumerator),
|
|
|
|
|
udev_list_entry_get_name (l));
|
|
|
|
|
if (!udevice)
|
|
|
|
|
continue;
|
2013-05-29 12:00:50 -03:00
|
|
|
|
2017-03-12 15:54:02 +01:00
|
|
|
udev_device_added (platform, udevice);
|
|
|
|
|
udev_device_unref (udevice);
|
2016-03-04 20:57:07 +01:00
|
|
|
}
|
2017-03-12 15:54:02 +01:00
|
|
|
|
|
|
|
|
udev_enumerate_unref (enumerator);
|
2013-05-29 12:00:50 -03:00
|
|
|
}
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
|
|
|
|
|
2018-09-04 16:43:44 +02:00
|
|
|
NMPlatform *
|
|
|
|
|
nm_linux_platform_new (gboolean log_with_ptr, gboolean netns_support)
|
|
|
|
|
{
|
|
|
|
|
gboolean use_udev = FALSE;
|
|
|
|
|
|
|
|
|
|
if ( nmp_netns_is_initial ()
|
|
|
|
|
&& access ("/sys", W_OK) == 0)
|
|
|
|
|
use_udev = TRUE;
|
|
|
|
|
|
|
|
|
|
return g_object_new (NM_TYPE_LINUX_PLATFORM,
|
|
|
|
|
NM_PLATFORM_LOG_WITH_PTR, log_with_ptr,
|
|
|
|
|
NM_PLATFORM_USE_UDEV, use_udev,
|
|
|
|
|
NM_PLATFORM_NETNS_SUPPORT, netns_support,
|
|
|
|
|
NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-14 16:39:51 +02:00
|
|
|
static void
|
|
|
|
|
dispose (GObject *object)
|
|
|
|
|
{
|
2015-04-06 18:29:36 +02:00
|
|
|
NMPlatform *platform = NM_PLATFORM (object);
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (platform);
|
|
|
|
|
|
2015-05-12 07:14:55 +02:00
|
|
|
_LOGD ("dispose");
|
|
|
|
|
|
2018-02-19 13:42:03 +01:00
|
|
|
delayed_action_wait_for_nl_response_complete_all (platform,
|
|
|
|
|
WAIT_FOR_NL_RESPONSE_RESULT_FAILED_DISPOSING);
|
2015-12-14 14:47:41 +01:00
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
priv->delayed_action.flags = DELAYED_ACTION_TYPE_NONE;
|
|
|
|
|
g_ptr_array_set_size (priv->delayed_action.list_master_connected, 0);
|
|
|
|
|
g_ptr_array_set_size (priv->delayed_action.list_refresh_link, 0);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2015-04-14 16:39:51 +02:00
|
|
|
G_OBJECT_CLASS (nm_linux_platform_parent_class)->dispose (object);
|
|
|
|
|
}
|
|
|
|
|
|
2013-03-27 22:23:24 +01:00
|
|
|
static void
|
2016-09-29 13:49:01 +02:00
|
|
|
finalize (GObject *object)
|
2013-03-27 22:23:24 +01:00
|
|
|
{
|
|
|
|
|
NMLinuxPlatformPrivate *priv = NM_LINUX_PLATFORM_GET_PRIVATE (object);
|
|
|
|
|
|
platform: fetch objects via the event socket
Use the event socket to request object via NLM_F_DUMP.
No longer use 'priv->nlh' socket to fetch objects.
Instead fetch them via the priv->nlh_event socket that also
provides asynchronous events when objects change.
That way, the events are in sync with our explicit requests
and we can directly use the events. Previously, the events were
only used to indicate that a refetch must happen, so that every
event triggered a complete dump of all addresses/routes.
We still use 'priv->nlh' to make synchronous requests such as
adding/changing/deleting objects. That means, after we send a
request, we must make sure that the result manifested itself
at 'nlh_event' socket and the platform cache.
That's why we sometimes still must force a dump to sync changes.
That could be improved by using only one netlink socket so that
we would wait for the ACK of our request.
While not yet perfect, this already significantly reduces the number of
fetches. Additionally, before, whenever requesting a dump of addresses
or routes (which we did much more often, search for "get_kernel_object for type"
log lines), we always dumped IPv4 and IPv6 together. Now only request
the addr-family in question.
https://bugzilla.gnome.org/show_bug.cgi?id=747985
https://bugzilla.redhat.com/show_bug.cgi?id=1211133
2015-05-10 10:02:31 +02:00
|
|
|
g_ptr_array_unref (priv->delayed_action.list_master_connected);
|
|
|
|
|
g_ptr_array_unref (priv->delayed_action.list_refresh_link);
|
2015-12-14 14:47:41 +01:00
|
|
|
g_array_unref (priv->delayed_action.list_wait_for_nl_response);
|
2015-04-06 18:29:36 +02:00
|
|
|
|
2018-05-15 20:29:30 +02:00
|
|
|
nl_socket_free (priv->genl);
|
|
|
|
|
|
2013-03-27 22:23:24 +01:00
|
|
|
g_source_remove (priv->event_id);
|
|
|
|
|
g_io_channel_unref (priv->event_channel);
|
2015-12-15 10:51:26 +01:00
|
|
|
nl_socket_free (priv->nlh);
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2015-10-06 19:48:35 +02:00
|
|
|
if (priv->sysctl_get_prev_values) {
|
|
|
|
|
sysctl_clear_cache_list = g_slist_remove (sysctl_clear_cache_list, object);
|
2015-08-30 15:51:20 +02:00
|
|
|
g_hash_table_destroy (priv->sysctl_get_prev_values);
|
2015-10-06 19:48:35 +02:00
|
|
|
}
|
2015-08-30 15:51:20 +02:00
|
|
|
|
2017-03-12 15:54:02 +01:00
|
|
|
priv->udev_client = nm_udev_client_unref (priv->udev_client);
|
|
|
|
|
|
2013-03-27 22:23:24 +01:00
|
|
|
G_OBJECT_CLASS (nm_linux_platform_parent_class)->finalize (object);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
nm_linux_platform_class_init (NMLinuxPlatformClass *klass)
|
|
|
|
|
{
|
|
|
|
|
GObjectClass *object_class = G_OBJECT_CLASS (klass);
|
|
|
|
|
NMPlatformClass *platform_class = NM_PLATFORM_CLASS (klass);
|
|
|
|
|
|
2015-04-18 12:53:45 +02:00
|
|
|
object_class->constructed = constructed;
|
2015-04-14 16:39:51 +02:00
|
|
|
object_class->dispose = dispose;
|
2016-09-29 13:49:01 +02:00
|
|
|
object_class->finalize = finalize;
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2013-04-03 16:10:38 +02:00
|
|
|
platform_class->sysctl_set = sysctl_set;
|
|
|
|
|
platform_class->sysctl_get = sysctl_get;
|
|
|
|
|
|
2013-03-27 22:23:24 +01:00
|
|
|
platform_class->link_add = link_add;
|
|
|
|
|
platform_class->link_delete = link_delete;
|
2015-10-12 13:44:44 +02:00
|
|
|
|
2018-02-08 15:17:23 +01:00
|
|
|
platform_class->refresh_all = refresh_all;
|
2014-02-11 13:58:00 +01:00
|
|
|
platform_class->link_refresh = link_refresh;
|
|
|
|
|
|
2016-03-08 13:02:58 +01:00
|
|
|
platform_class->link_set_netns = link_set_netns;
|
|
|
|
|
|
2013-03-27 22:23:24 +01:00
|
|
|
platform_class->link_set_up = link_set_up;
|
|
|
|
|
platform_class->link_set_down = link_set_down;
|
|
|
|
|
platform_class->link_set_arp = link_set_arp;
|
|
|
|
|
platform_class->link_set_noarp = link_set_noarp;
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2015-06-15 14:41:35 +02:00
|
|
|
platform_class->link_get_udi = link_get_udi;
|
2014-10-13 08:26:52 +02:00
|
|
|
|
2014-07-24 15:57:08 -05:00
|
|
|
platform_class->link_set_user_ipv6ll_enabled = link_set_user_ipv6ll_enabled;
|
2016-04-30 16:48:32 +02:00
|
|
|
platform_class->link_set_token = link_set_token;
|
2014-07-24 15:57:08 -05:00
|
|
|
|
2013-03-27 22:53:55 +01:00
|
|
|
platform_class->link_set_address = link_set_address;
|
2014-10-03 17:37:26 -05:00
|
|
|
platform_class->link_get_permanent_address = link_get_permanent_address;
|
2013-04-15 21:48:12 +02:00
|
|
|
platform_class->link_set_mtu = link_set_mtu;
|
2017-07-03 10:10:34 +02:00
|
|
|
platform_class->link_set_name = link_set_name;
|
2018-05-23 14:11:14 +02:00
|
|
|
platform_class->link_set_sriov_params = link_set_sriov_params;
|
2018-05-23 14:33:24 +02:00
|
|
|
platform_class->link_set_sriov_vfs = link_set_sriov_vfs;
|
2013-03-27 22:53:55 +01:00
|
|
|
|
2013-10-11 14:59:26 -04:00
|
|
|
platform_class->link_get_physical_port_id = link_get_physical_port_id;
|
2015-03-24 12:35:36 -05:00
|
|
|
platform_class->link_get_dev_id = link_get_dev_id;
|
2014-02-05 11:56:44 +01:00
|
|
|
platform_class->link_get_wake_on_lan = link_get_wake_on_lan;
|
2014-10-03 13:41:49 -05:00
|
|
|
platform_class->link_get_driver_info = link_get_driver_info;
|
2013-10-11 14:59:26 -04:00
|
|
|
|
2013-03-27 22:53:55 +01:00
|
|
|
platform_class->link_supports_carrier_detect = link_supports_carrier_detect;
|
|
|
|
|
platform_class->link_supports_vlans = link_supports_vlans;
|
2017-04-14 23:03:33 +02:00
|
|
|
platform_class->link_supports_sriov = link_supports_sriov;
|
2013-03-27 22:53:55 +01:00
|
|
|
|
2013-03-27 22:53:55 +01:00
|
|
|
platform_class->link_enslave = link_enslave;
|
|
|
|
|
platform_class->link_release = link_release;
|
|
|
|
|
|
2016-03-21 15:22:10 +01:00
|
|
|
platform_class->link_can_assume = link_can_assume;
|
|
|
|
|
|
2013-03-27 22:53:55 +01:00
|
|
|
platform_class->vlan_add = vlan_add;
|
2015-10-27 16:14:54 +01:00
|
|
|
platform_class->link_vlan_change = link_vlan_change;
|
2018-12-25 18:41:28 +01:00
|
|
|
platform_class->link_wireguard_change = link_wireguard_change;
|
2015-10-14 10:01:48 +02:00
|
|
|
platform_class->link_vxlan_add = link_vxlan_add;
|
2013-03-27 22:53:55 +01:00
|
|
|
|
2013-06-10 16:21:08 -03:00
|
|
|
platform_class->infiniband_partition_add = infiniband_partition_add;
|
2016-04-20 09:16:21 +02:00
|
|
|
platform_class->infiniband_partition_delete = infiniband_partition_delete;
|
2013-06-10 16:21:08 -03:00
|
|
|
|
2014-02-04 14:27:03 +01:00
|
|
|
platform_class->wifi_get_capabilities = wifi_get_capabilities;
|
|
|
|
|
platform_class->wifi_get_bssid = wifi_get_bssid;
|
|
|
|
|
platform_class->wifi_get_frequency = wifi_get_frequency;
|
|
|
|
|
platform_class->wifi_get_quality = wifi_get_quality;
|
|
|
|
|
platform_class->wifi_get_rate = wifi_get_rate;
|
|
|
|
|
platform_class->wifi_get_mode = wifi_get_mode;
|
|
|
|
|
platform_class->wifi_set_mode = wifi_set_mode;
|
2014-10-23 14:19:59 -04:00
|
|
|
platform_class->wifi_set_powersave = wifi_set_powersave;
|
2014-02-04 14:27:03 +01:00
|
|
|
platform_class->wifi_find_frequency = wifi_find_frequency;
|
|
|
|
|
platform_class->wifi_indicate_addressing_running = wifi_indicate_addressing_running;
|
2018-06-19 14:44:36 +02:00
|
|
|
platform_class->wifi_get_wake_on_wlan = wifi_get_wake_on_wlan;
|
2018-05-25 17:43:54 +02:00
|
|
|
platform_class->wifi_set_wake_on_wlan = wifi_set_wake_on_wlan;
|
2014-02-04 14:27:03 +01:00
|
|
|
|
|
|
|
|
platform_class->mesh_get_channel = mesh_get_channel;
|
|
|
|
|
platform_class->mesh_set_channel = mesh_set_channel;
|
|
|
|
|
platform_class->mesh_set_ssid = mesh_set_ssid;
|
|
|
|
|
|
2018-06-05 15:20:54 +02:00
|
|
|
platform_class->wpan_get_pan_id = wpan_get_pan_id;
|
|
|
|
|
platform_class->wpan_set_pan_id = wpan_set_pan_id;
|
|
|
|
|
platform_class->wpan_get_short_addr = wpan_get_short_addr;
|
|
|
|
|
platform_class->wpan_set_short_addr = wpan_set_short_addr;
|
2018-09-19 19:43:09 +02:00
|
|
|
platform_class->wpan_set_channel = wpan_set_channel;
|
2018-06-05 15:20:54 +02:00
|
|
|
|
2015-09-01 22:11:47 +02:00
|
|
|
platform_class->link_gre_add = link_gre_add;
|
2015-11-27 22:22:25 +01:00
|
|
|
platform_class->link_ip6tnl_add = link_ip6tnl_add;
|
2018-06-26 12:06:43 +02:00
|
|
|
platform_class->link_ip6gre_add = link_ip6gre_add;
|
2016-06-30 18:20:09 +02:00
|
|
|
platform_class->link_macsec_add = link_macsec_add;
|
2015-12-03 17:09:50 +01:00
|
|
|
platform_class->link_macvlan_add = link_macvlan_add;
|
2015-11-27 14:01:56 +01:00
|
|
|
platform_class->link_ipip_add = link_ipip_add;
|
2015-11-11 18:41:48 +01:00
|
|
|
platform_class->link_sit_add = link_sit_add;
|
core/platform: add support for TUN/TAP netlink support and various cleanup
Kernel recently got support for exposing TUN/TAP information on netlink
[1], [2], [3]. Add support for it to the platform cache.
The advantage of using netlink is that querying sysctl bypasses the
order of events of the netlink socket. It is out of sync and racy. For
example, platform cache might still think that a tun device exists, but
a subsequent lookup at sysfs might fail because the device was deleted
in the meantime. Another point is, that we don't get change
notifications via sysctl and that it requires various extra syscalls
to read the device information. If the tun information is present on
netlink, put it into the cache. This bypasses checking sysctl while
we keep looking at sysctl for backward compatibility until we require
support from kernel.
Notes:
- we had two link types NM_LINK_TYPE_TAP and NM_LINK_TYPE_TUN. This
deviates from the model of how kernel treats TUN/TAP devices, which
makes it more complicated. The link type of a NMPlatformLink instance
should match what kernel thinks about the device. Point in case,
when parsing RTM_NETLINK messages, we very early need to determine
the link type (_linktype_get_type()). However, to determine the
type of a TUN/TAP at that point, we need to look into nested
netlink attributes which in turn depend on the type (IFLA_INFO_KIND
and IFLA_INFO_DATA), or even worse, we would need to look into
sysctl for older kernel vesions. Now, the TUN/TAP type is a property
of the link type NM_LINK_TYPE_TUN, instead of determining two
different link types.
- various parts of the API (both kernel's sysctl vs. netlink) and
NMDeviceTun vs. NMSettingTun disagree whether the PI is positive
(NM_SETTING_TUN_PI, IFLA_TUN_PI, NMPlatformLnkTun.pi) or inverted
(NM_DEVICE_TUN_NO_PI, IFF_NO_PI). There is no consistent way,
but prefer the positive form for internal API at NMPlatformLnkTun.pi.
- previously NMDeviceTun.mode could not change after initializing
the object. Allow for that to happen, because forcing some properties
that are reported by kernel to not change is wrong, in case they
might change. Of course, in practice kernel doesn't allow the device
to ever change its type, but the type property of the NMDeviceTun
should not make that assumption, because, if it actually changes, what
would it mean?
- note that as of now, new netlink API is not yet merged to mainline Linus
tree. Shortcut _parse_lnk_tun() to not accidentally use unstable API
for now.
[1] https://bugzilla.redhat.com/show_bug.cgi?id=1277457
[2] https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git/commit/?id=1ec010e705934c8acbe7dbf31afc81e60e3d828b
[3] https://git.kernel.org/pub/scm/network/iproute2/iproute2-next.git/commit/?id=118eda77d6602616bc523a17ee45171e879d1818
https://bugzilla.redhat.com/show_bug.cgi?id=1547213
https://github.com/NetworkManager/NetworkManager/pull/77
2018-03-13 15:29:03 +01:00
|
|
|
platform_class->link_tun_add = link_tun_add;
|
2018-05-22 11:50:46 +02:00
|
|
|
platform_class->link_6lowpan_add = link_6lowpan_add;
|
2015-09-01 22:11:47 +02:00
|
|
|
|
2017-11-29 13:10:39 +01:00
|
|
|
platform_class->object_delete = object_delete;
|
2013-03-27 22:23:24 +01:00
|
|
|
platform_class->ip4_address_add = ip4_address_add;
|
|
|
|
|
platform_class->ip6_address_add = ip6_address_add;
|
|
|
|
|
platform_class->ip4_address_delete = ip4_address_delete;
|
|
|
|
|
platform_class->ip6_address_delete = ip6_address_delete;
|
2013-03-27 22:23:24 +01:00
|
|
|
|
2017-08-02 10:27:32 +02:00
|
|
|
platform_class->ip_route_add = ip_route_add;
|
2017-08-16 16:13:24 +02:00
|
|
|
platform_class->ip_route_get = ip_route_get;
|
2014-01-07 17:21:12 +01:00
|
|
|
|
2017-11-15 20:36:35 +01:00
|
|
|
platform_class->qdisc_add = qdisc_add;
|
2017-11-15 20:36:35 +01:00
|
|
|
platform_class->tfilter_add = tfilter_add;
|
2017-11-15 20:36:35 +01:00
|
|
|
|
2017-10-10 18:20:05 +02:00
|
|
|
platform_class->check_kernel_support = check_kernel_support;
|
2015-06-19 15:38:41 +02:00
|
|
|
|
|
|
|
|
platform_class->process_events = process_events;
|
2013-03-27 22:23:24 +01:00
|
|
|
}
|
2015-04-06 18:29:36 +02:00
|
|
|
|