2020-09-29 16:42:22 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0+ */
|
2019-09-25 13:13:40 +02:00
|
|
|
/*
|
2017-04-17 18:40:52 +02:00
|
|
|
* Copyright (C) 2017 Red Hat, Inc.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include "nm-default.h"
|
|
|
|
|
|
|
|
|
|
#include "nm-netns.h"
|
|
|
|
|
|
2019-04-15 08:16:00 +02:00
|
|
|
#include "nm-glib-aux/nm-dedup-multi.h"
|
2020-09-16 16:49:46 +02:00
|
|
|
#include "nm-glib-aux/nm-c-list.h"
|
core: pass NMDedupMultiIndex instance to NMIP4Config and other
NMIP4Config, NMIP6Config, and NMPlatform shall share one
NMDedupMultiIndex instance.
For that, pass an NMDedupMultiIndex instance to NMPlatform and NMNetns.
NMNetns than passes it on to NMDevice, NMDhcpClient, NMIP4Config and NMIP6Config.
So currently NMNetns is the access point to the shared NMDedupMultiIndex
instance, and it gets it from it's NMPlatform instance.
The NMDedupMultiIndex instance is really a singleton, we don't want
multiple instances of it. However, for testing, instead of adding a
singleton instance, pass the instance explicitly around.
2017-06-12 08:16:47 +02:00
|
|
|
|
2019-03-11 11:37:40 +01:00
|
|
|
#include "NetworkManagerUtils.h"
|
|
|
|
|
#include "nm-core-internal.h"
|
2020-07-18 19:01:04 +02:00
|
|
|
#include "nm-l3cfg.h"
|
2017-04-17 18:40:52 +02:00
|
|
|
#include "platform/nm-platform.h"
|
|
|
|
|
#include "platform/nmp-netns.h"
|
2019-03-11 11:37:40 +01:00
|
|
|
#include "platform/nmp-rules-manager.h"
|
2017-04-17 18:40:52 +02:00
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
NM_GOBJECT_PROPERTIES_DEFINE_BASE(PROP_PLATFORM, );
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
2020-07-21 11:21:44 +02:00
|
|
|
NMNetns * _self_signal_user_data;
|
2017-04-17 18:40:52 +02:00
|
|
|
NMPlatform * platform;
|
|
|
|
|
NMPNetns * platform_netns;
|
2019-03-11 11:37:40 +01:00
|
|
|
NMPRulesManager *rules_manager;
|
2020-07-18 19:01:04 +02:00
|
|
|
GHashTable * l3cfgs;
|
2020-09-14 15:07:18 +02:00
|
|
|
GHashTable * shared_ips;
|
2020-07-21 11:21:44 +02:00
|
|
|
CList l3cfg_signal_pending_lst_head;
|
|
|
|
|
guint signal_pending_idle_id;
|
2017-04-17 18:40:52 +02:00
|
|
|
} NMNetnsPrivate;
|
|
|
|
|
|
|
|
|
|
struct _NMNetns {
|
|
|
|
|
GObject parent;
|
|
|
|
|
NMNetnsPrivate _priv;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct _NMNetnsClass {
|
|
|
|
|
GObjectClass parent;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
G_DEFINE_TYPE(NMNetns, nm_netns, G_TYPE_OBJECT);
|
|
|
|
|
|
|
|
|
|
#define NM_NETNS_GET_PRIVATE(self) _NM_GET_PRIVATE(self, NMNetns, NM_IS_NETNS)
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
#define _NMLOG_DOMAIN LOGD_CORE
|
|
|
|
|
#define _NMLOG_PREFIX_NAME "netns"
|
|
|
|
|
#define _NMLOG(level, ...) \
|
|
|
|
|
G_STMT_START \
|
|
|
|
|
{ \
|
|
|
|
|
nm_log((level), \
|
|
|
|
|
(_NMLOG_DOMAIN), \
|
|
|
|
|
NULL, \
|
|
|
|
|
NULL, \
|
|
|
|
|
"netns[" NM_HASH_OBFUSCATE_PTR_FMT "]: " _NM_UTILS_MACRO_FIRST(__VA_ARGS__), \
|
|
|
|
|
NM_HASH_OBFUSCATE_PTR(self) _NM_UTILS_MACRO_REST(__VA_ARGS__)); \
|
|
|
|
|
} \
|
|
|
|
|
G_STMT_END
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
NM_DEFINE_SINGLETON_GETTER(NMNetns, nm_netns_get, NM_TYPE_NETNS);
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
NMPNetns *
|
|
|
|
|
nm_netns_get_platform_netns(NMNetns *self)
|
|
|
|
|
{
|
|
|
|
|
return NM_NETNS_GET_PRIVATE(self)->platform_netns;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NMPlatform *
|
|
|
|
|
nm_netns_get_platform(NMNetns *self)
|
|
|
|
|
{
|
|
|
|
|
return NM_NETNS_GET_PRIVATE(self)->platform;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-11 11:37:40 +01:00
|
|
|
NMPRulesManager *
|
|
|
|
|
nm_netns_get_rules_manager(NMNetns *self)
|
|
|
|
|
{
|
|
|
|
|
return NM_NETNS_GET_PRIVATE(self)->rules_manager;
|
|
|
|
|
}
|
|
|
|
|
|
core: pass NMDedupMultiIndex instance to NMIP4Config and other
NMIP4Config, NMIP6Config, and NMPlatform shall share one
NMDedupMultiIndex instance.
For that, pass an NMDedupMultiIndex instance to NMPlatform and NMNetns.
NMNetns than passes it on to NMDevice, NMDhcpClient, NMIP4Config and NMIP6Config.
So currently NMNetns is the access point to the shared NMDedupMultiIndex
instance, and it gets it from it's NMPlatform instance.
The NMDedupMultiIndex instance is really a singleton, we don't want
multiple instances of it. However, for testing, instead of adding a
singleton instance, pass the instance explicitly around.
2017-06-12 08:16:47 +02:00
|
|
|
NMDedupMultiIndex *
|
|
|
|
|
nm_netns_get_multi_idx(NMNetns *self)
|
|
|
|
|
{
|
|
|
|
|
return nm_platform_get_multi_idx(NM_NETNS_GET_PRIVATE(self)->platform);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
typedef struct {
|
|
|
|
|
int ifindex;
|
2020-09-23 18:55:08 +02:00
|
|
|
guint32 signal_pending_obj_type_flags;
|
2020-07-18 19:01:04 +02:00
|
|
|
NML3Cfg *l3cfg;
|
2020-07-21 11:21:44 +02:00
|
|
|
CList signal_pending_lst;
|
2020-07-18 19:01:04 +02:00
|
|
|
} L3CfgData;
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_l3cfg_data_free(gpointer ptr)
|
|
|
|
|
{
|
|
|
|
|
L3CfgData *l3cfg_data = ptr;
|
|
|
|
|
|
2020-07-21 11:21:44 +02:00
|
|
|
c_list_unlink_stale(&l3cfg_data->signal_pending_lst);
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
nm_g_slice_free(l3cfg_data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_l3cfg_weak_notify(gpointer data, GObject *where_the_object_was)
|
|
|
|
|
{
|
|
|
|
|
NMNetns * self = NM_NETNS(data);
|
|
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(data);
|
|
|
|
|
NML3Cfg * l3cfg = NM_L3CFG(where_the_object_was);
|
|
|
|
|
int ifindex = nm_l3cfg_get_ifindex(l3cfg);
|
|
|
|
|
|
|
|
|
|
if (!g_hash_table_remove(priv->l3cfgs, &ifindex))
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
|
|
|
|
if (NM_UNLIKELY(g_hash_table_size(priv->l3cfgs) == 0))
|
|
|
|
|
g_object_unref(self);
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-21 12:15:06 +02:00
|
|
|
NML3Cfg *
|
|
|
|
|
nm_netns_get_l3cfg(NMNetns *self, int ifindex)
|
|
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv;
|
|
|
|
|
|
|
|
|
|
g_return_val_if_fail(NM_IS_NETNS(self), NULL);
|
|
|
|
|
g_return_val_if_fail(ifindex > 0, NULL);
|
|
|
|
|
|
|
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
return g_hash_table_lookup(priv->l3cfgs, &ifindex);
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
NML3Cfg *
|
|
|
|
|
nm_netns_access_l3cfg(NMNetns *self, int ifindex)
|
|
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv;
|
|
|
|
|
L3CfgData * l3cfg_data;
|
|
|
|
|
|
|
|
|
|
g_return_val_if_fail(NM_IS_NETNS(self), NULL);
|
|
|
|
|
g_return_val_if_fail(ifindex > 0, NULL);
|
|
|
|
|
|
|
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
l3cfg_data = g_hash_table_lookup(priv->l3cfgs, &ifindex);
|
|
|
|
|
|
|
|
|
|
if (l3cfg_data) {
|
|
|
|
|
nm_log_trace(LOGD_CORE,
|
|
|
|
|
"l3cfg[" NM_HASH_OBFUSCATE_PTR_FMT ",ifindex=%d] %s",
|
|
|
|
|
NM_HASH_OBFUSCATE_PTR(l3cfg_data->l3cfg),
|
|
|
|
|
ifindex,
|
|
|
|
|
"referenced");
|
|
|
|
|
return g_object_ref(l3cfg_data->l3cfg);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
l3cfg_data = g_slice_new(L3CfgData);
|
|
|
|
|
*l3cfg_data = (L3CfgData){
|
2020-07-21 11:21:44 +02:00
|
|
|
.ifindex = ifindex,
|
|
|
|
|
.l3cfg = nm_l3cfg_new(self, ifindex),
|
|
|
|
|
.signal_pending_lst = C_LIST_INIT(l3cfg_data->signal_pending_lst),
|
2020-07-18 19:01:04 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if (!g_hash_table_add(priv->l3cfgs, l3cfg_data))
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
|
|
|
|
if (NM_UNLIKELY(g_hash_table_size(priv->l3cfgs) == 1))
|
|
|
|
|
g_object_ref(self);
|
|
|
|
|
|
|
|
|
|
g_object_weak_ref(G_OBJECT(l3cfg_data->l3cfg), _l3cfg_weak_notify, self);
|
|
|
|
|
|
|
|
|
|
/* Transfer ownership! We keep only a weak ref. */
|
|
|
|
|
return l3cfg_data->l3cfg;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2020-07-21 11:21:44 +02:00
|
|
|
static gboolean
|
|
|
|
|
_platform_signal_on_idle_cb(gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
gs_unref_object NMNetns *self = g_object_ref(NM_NETNS(user_data));
|
|
|
|
|
NMNetnsPrivate * priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
L3CfgData * l3cfg_data;
|
2020-09-16 16:49:46 +02:00
|
|
|
CList work_list;
|
2020-07-21 11:21:44 +02:00
|
|
|
|
2020-09-16 16:49:46 +02:00
|
|
|
priv->signal_pending_idle_id = 0;
|
|
|
|
|
|
|
|
|
|
/* we emit all queued signals together. However, we don't want to hook the
|
|
|
|
|
* main loop for longer than the currently queued elements.
|
|
|
|
|
*
|
|
|
|
|
* If we catch more change events, they will be queued and processed by a future
|
|
|
|
|
* idle handler.
|
|
|
|
|
*
|
|
|
|
|
* Hence, move the list to a temporary list. Isn't CList great? */
|
|
|
|
|
|
|
|
|
|
c_list_init(&work_list);
|
|
|
|
|
c_list_splice(&work_list, &priv->l3cfg_signal_pending_lst_head);
|
|
|
|
|
|
|
|
|
|
while ((l3cfg_data = c_list_first_entry(&work_list, L3CfgData, signal_pending_lst))) {
|
2020-09-19 12:20:45 +02:00
|
|
|
nm_assert(NM_IS_L3CFG(l3cfg_data->l3cfg));
|
2020-07-21 11:21:44 +02:00
|
|
|
c_list_unlink(&l3cfg_data->signal_pending_lst);
|
2020-09-23 18:55:08 +02:00
|
|
|
_nm_l3cfg_notify_platform_change_on_idle(
|
|
|
|
|
l3cfg_data->l3cfg,
|
|
|
|
|
nm_steal_int(&l3cfg_data->signal_pending_obj_type_flags));
|
2020-07-21 11:21:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return G_SOURCE_REMOVE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_platform_signal_cb(NMPlatform * platform,
|
|
|
|
|
int obj_type_i,
|
|
|
|
|
int ifindex,
|
|
|
|
|
gconstpointer platform_object,
|
|
|
|
|
int change_type_i,
|
|
|
|
|
NMNetns ** p_self)
|
|
|
|
|
{
|
|
|
|
|
NMNetns * self = NM_NETNS(*p_self);
|
|
|
|
|
NMNetnsPrivate * priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
const NMPObjectType obj_type = obj_type_i;
|
2020-07-29 08:39:12 +02:00
|
|
|
const NMPlatformSignalChangeType change_type = change_type_i;
|
2020-07-21 11:21:44 +02:00
|
|
|
L3CfgData * l3cfg_data;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-07-21 11:21:44 +02:00
|
|
|
l3cfg_data = g_hash_table_lookup(priv->l3cfgs, &ifindex);
|
|
|
|
|
if (!l3cfg_data)
|
|
|
|
|
return;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-23 18:55:08 +02:00
|
|
|
l3cfg_data->signal_pending_obj_type_flags |= nmp_object_type_to_flags(obj_type);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-07-29 08:39:12 +02:00
|
|
|
if (c_list_is_empty(&l3cfg_data->signal_pending_lst)) {
|
|
|
|
|
c_list_link_tail(&priv->l3cfg_signal_pending_lst_head, &l3cfg_data->signal_pending_lst);
|
|
|
|
|
if (priv->signal_pending_idle_id == 0)
|
|
|
|
|
priv->signal_pending_idle_id = g_idle_add(_platform_signal_on_idle_cb, self);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-07-29 08:39:12 +02:00
|
|
|
_nm_l3cfg_notify_platform_change(l3cfg_data->l3cfg,
|
|
|
|
|
change_type,
|
|
|
|
|
NMP_OBJECT_UP_CAST(platform_object));
|
2020-07-21 11:21:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
NMNetnsSharedIPHandle *
|
|
|
|
|
nm_netns_shared_ip_reserve(NMNetns *self)
|
|
|
|
|
{
|
|
|
|
|
NMNetnsPrivate * priv;
|
|
|
|
|
NMNetnsSharedIPHandle *handle;
|
|
|
|
|
const in_addr_t addr_start = ntohl(0x0a2a0001u); /* 10.42.0.1 */
|
|
|
|
|
in_addr_t addr;
|
|
|
|
|
char sbuf_addr[NM_UTILS_INET_ADDRSTRLEN];
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
/* Find an unused address in the 10.42.x.x range */
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
g_return_val_if_fail(NM_IS_NETNS(self), NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
if (!priv->shared_ips) {
|
|
|
|
|
addr = addr_start;
|
|
|
|
|
priv->shared_ips = g_hash_table_new(nm_puint32_hash, nm_puint32_equals);
|
|
|
|
|
g_object_ref(self);
|
|
|
|
|
} else {
|
|
|
|
|
guint32 count;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
nm_assert(g_hash_table_size(priv->shared_ips) > 0);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
count = 0u;
|
|
|
|
|
for (;;) {
|
|
|
|
|
addr = addr_start + htonl(count << 8u);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
handle = g_hash_table_lookup(priv->shared_ips, &addr);
|
|
|
|
|
if (!handle)
|
|
|
|
|
break;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
count++;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
if (count > 0xFFu) {
|
|
|
|
|
if (handle->_ref_count == 1) {
|
|
|
|
|
_LOGE("shared-ip4: ran out of shared IP addresses. Reuse %s/24",
|
|
|
|
|
_nm_utils_inet4_ntop(handle->addr, sbuf_addr));
|
|
|
|
|
} else {
|
|
|
|
|
_LOGD("shared-ip4: reserved IP address range %s/24 (duplicate)",
|
|
|
|
|
_nm_utils_inet4_ntop(handle->addr, sbuf_addr));
|
|
|
|
|
}
|
|
|
|
|
handle->_ref_count++;
|
|
|
|
|
return handle;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
handle = g_slice_new(NMNetnsSharedIPHandle);
|
|
|
|
|
*handle = (NMNetnsSharedIPHandle){
|
|
|
|
|
.addr = addr,
|
|
|
|
|
._ref_count = 1,
|
|
|
|
|
._self = self,
|
|
|
|
|
};
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
g_hash_table_add(priv->shared_ips, handle);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
_LOGD("shared-ip4: reserved IP address range %s/24",
|
|
|
|
|
_nm_utils_inet4_ntop(handle->addr, sbuf_addr));
|
|
|
|
|
return handle;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nm_netns_shared_ip_release(NMNetnsSharedIPHandle *handle)
|
|
|
|
|
{
|
|
|
|
|
NMNetns * self;
|
|
|
|
|
NMNetnsPrivate *priv;
|
|
|
|
|
char sbuf_addr[NM_UTILS_INET_ADDRSTRLEN];
|
|
|
|
|
|
|
|
|
|
g_return_if_fail(handle);
|
|
|
|
|
|
|
|
|
|
self = handle->_self;
|
|
|
|
|
|
|
|
|
|
g_return_if_fail(NM_IS_NETNS(self));
|
|
|
|
|
|
|
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
nm_assert(handle->_ref_count > 0);
|
|
|
|
|
nm_assert(handle == nm_g_hash_table_lookup(priv->shared_ips, handle));
|
|
|
|
|
|
|
|
|
|
if (handle->_ref_count > 1) {
|
|
|
|
|
nm_assert(handle->addr == ntohl(0x0A2AFF01u)); /* 10.42.255.1 */
|
|
|
|
|
handle->_ref_count--;
|
|
|
|
|
_LOGD("shared-ip4: release IP address range %s/24 (%d more references held)",
|
|
|
|
|
_nm_utils_inet4_ntop(handle->addr, sbuf_addr),
|
|
|
|
|
handle->_ref_count);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!g_hash_table_remove(priv->shared_ips, handle))
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
|
|
|
|
if (g_hash_table_size(priv->shared_ips) == 0) {
|
|
|
|
|
nm_clear_pointer(&priv->shared_ips, g_hash_table_unref);
|
|
|
|
|
g_object_unref(self);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_LOGD("shared-ip4: release IP address range %s/24",
|
|
|
|
|
_nm_utils_inet4_ntop(handle->addr, sbuf_addr));
|
|
|
|
|
|
|
|
|
|
handle->_self = NULL;
|
|
|
|
|
nm_g_slice_free(handle);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
static void
|
|
|
|
|
set_property(GObject *object, guint prop_id, const GValue *value, GParamSpec *pspec)
|
|
|
|
|
{
|
|
|
|
|
NMNetns * self = NM_NETNS(object);
|
|
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
switch (prop_id) {
|
|
|
|
|
case PROP_PLATFORM:
|
|
|
|
|
/* construct-only */
|
|
|
|
|
priv->platform = g_value_get_object(value) ?: NM_PLATFORM_GET;
|
|
|
|
|
if (!priv->platform)
|
|
|
|
|
g_return_if_reached();
|
|
|
|
|
g_object_ref(priv->platform);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
nm_netns_init(NMNetns *self)
|
|
|
|
|
{
|
2020-07-21 11:21:44 +02:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
priv->_self_signal_user_data = self;
|
|
|
|
|
c_list_init(&priv->l3cfg_signal_pending_lst_head);
|
2017-04-17 18:40:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
constructed(GObject *object)
|
|
|
|
|
{
|
|
|
|
|
NMNetns * self = NM_NETNS(object);
|
|
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
if (!priv->platform)
|
|
|
|
|
g_return_if_reached();
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
priv->l3cfgs = g_hash_table_new_full(nm_pint_hash, nm_pint_equals, _l3cfg_data_free, NULL);
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
priv->platform_netns = nm_platform_netns_get(priv->platform);
|
|
|
|
|
|
2019-04-10 12:52:56 +02:00
|
|
|
priv->rules_manager = nmp_rules_manager_new(priv->platform);
|
platform: support weakly tracked routing rules in NMPRulesManager
Policy routing rules are global, and unlike routes not tied to an interface by ifindex.
That means, while we take full control over all routes of an interface during a sync,
we need to consider that multiple parties can contribute to the global set of rules.
That might be muliple connection profiles providing the same rule, or rules that are added
externally by the user. NMPRulesManager mediates for that.
This is done by NMPRulesManager "tracking" rules.
Rules that are not tracked by NMPRulesManager are completely ignored (and
considered externally added).
When tracking a rule, the caller provides a track-priority. If multiple
parties track a rule, then the highest (absolute value of the) priority
wins.
If the highest track-priority is positive, NMPRulesManager will add the rule if
it's not present.
When the highest track-priority is negative, then NMPRulesManager will remove the
rule if it's present (enforce its absence).
The complicated part is, when a rule that was previously tracked becomes no
longer tracked. In that case, we need to restore the previous state.
If NetworkManager added the rule earlier, then untracking the rule
NMPRulesManager will remove the rule again (restore its previous absent
state).
By default, if NetworkManager had a negative tracking-priority and removed the
rule earlier (enforced it to be absent), then when the rule becomes no
longer tracked, NetworkManager will not restore the rule.
Consider: the user adds a rule externally, and then activates a profile that
enforces the absence of the rule (causing NetworkManager to remove it).
When deactivating the profile, by default NetworkManager will not
restore such a rule! It's unclear whether that is a good idea, but it's
also unclear why the rule is there and whether NetworkManager should
really restore it.
Add weakly tracked rules to account for that. A tracking-priority of
zero indicates such weakly tracked rules. The only difference between an untracked
rule and a weakly tracked rule is, that when NetworkManager earlier removed the
rule (due to a negative tracking-priority), it *will* restore weakly
tracked rules when the rules becomes no longer (negatively) tracked.
And it attmpts to do that only once.
Likewise, if the rule is weakly tracked and already exists when
NMPRulesManager starts posively tracking the rule, then it would not
remove again, when no longer positively tracking it.
2019-04-10 13:47:52 +02:00
|
|
|
|
policy-routing: take ownership of externally configured rules
IP addresses, routes, TC and QDiscs are all tied to a certain interface.
So when NetworkManager manages an interface, it can be confident that
all related entires should be managed, deleted and modified by NetworkManager.
Routing policy rules are global. For that we have NMPRulesManager which
keeps track of whether NetworkManager owns a rule. This allows multiple
connection profiles to specify the same rule, and NMPRulesManager can
consolidate this information to know whether to add or remove the rule.
NMPRulesManager would also support to explicitly block a rule by
tracking it with negative priority. However that is still unused at
the moment. All that devices do is to add rules (track with positive
priority) and remove them (untrack) once the profile gets deactivated.
As rules are not exclusively owned by NetworkManager, NetworkManager
tries not to interfere with rules that it knows nothing about. That
means in particular, when NetworkManager starts it will "weakly track"
all rules that are present. "weakly track" is mostly interesting for two
cases:
- when NMPRulesManager had the same rule explicitly tracked (added) by a
device, then deactivating the device will leave the rule in place.
- when NMPRulesManager had the same rule explicitly blocked (tracked
with negative priority), then it would restore the rule when that
block gets removed (as said, currently nobody actually does this).
Note that when restarting NetworkManager, then the device may stay and
the rules kept. However after restart, NetworkManager no longer knows
that it previously added this route, so it would weakly track it and
never remove them again.
That is a problem. Avoid that, by whenever explicitly tracking a rule we
also make sure to no longer weakly track it. Most likely this rule was
indeed previously managed by NetworkManager. If this was really a rule
added by externally, then the user really should choose distinct
rule priorities to avoid such conflicts altogether.
2019-07-12 11:19:43 +02:00
|
|
|
/* Weakly track the default rules with a dummy user-tag. These
|
|
|
|
|
* rules are always weekly tracked... */
|
2019-04-10 12:52:56 +02:00
|
|
|
nmp_rules_manager_track_default(priv->rules_manager,
|
|
|
|
|
AF_UNSPEC,
|
|
|
|
|
0,
|
|
|
|
|
nm_netns_parent_class /* static dummy user-tag */);
|
policy-routing: take ownership of externally configured rules
IP addresses, routes, TC and QDiscs are all tied to a certain interface.
So when NetworkManager manages an interface, it can be confident that
all related entires should be managed, deleted and modified by NetworkManager.
Routing policy rules are global. For that we have NMPRulesManager which
keeps track of whether NetworkManager owns a rule. This allows multiple
connection profiles to specify the same rule, and NMPRulesManager can
consolidate this information to know whether to add or remove the rule.
NMPRulesManager would also support to explicitly block a rule by
tracking it with negative priority. However that is still unused at
the moment. All that devices do is to add rules (track with positive
priority) and remove them (untrack) once the profile gets deactivated.
As rules are not exclusively owned by NetworkManager, NetworkManager
tries not to interfere with rules that it knows nothing about. That
means in particular, when NetworkManager starts it will "weakly track"
all rules that are present. "weakly track" is mostly interesting for two
cases:
- when NMPRulesManager had the same rule explicitly tracked (added) by a
device, then deactivating the device will leave the rule in place.
- when NMPRulesManager had the same rule explicitly blocked (tracked
with negative priority), then it would restore the rule when that
block gets removed (as said, currently nobody actually does this).
Note that when restarting NetworkManager, then the device may stay and
the rules kept. However after restart, NetworkManager no longer knows
that it previously added this route, so it would weakly track it and
never remove them again.
That is a problem. Avoid that, by whenever explicitly tracking a rule we
also make sure to no longer weakly track it. Most likely this rule was
indeed previously managed by NetworkManager. If this was really a rule
added by externally, then the user really should choose distinct
rule priorities to avoid such conflicts altogether.
2019-07-12 11:19:43 +02:00
|
|
|
|
|
|
|
|
/* Also weakly track all existing rules. These were added before NetworkManager
|
|
|
|
|
* starts, so they are probably none of NetworkManager's business.
|
|
|
|
|
*
|
|
|
|
|
* However note that during service restart, devices may stay up and rules kept.
|
|
|
|
|
* That means, after restart such rules may have been added by a previous run
|
|
|
|
|
* of NetworkManager, we just don't know.
|
|
|
|
|
*
|
|
|
|
|
* For that reason, whenever we will touch such rules later one, we make them
|
|
|
|
|
* fully owned and no longer weekly tracked. See %NMP_RULES_MANAGER_EXTERN_WEAKLY_TRACKED_USER_TAG. */
|
platform: support weakly tracked routing rules in NMPRulesManager
Policy routing rules are global, and unlike routes not tied to an interface by ifindex.
That means, while we take full control over all routes of an interface during a sync,
we need to consider that multiple parties can contribute to the global set of rules.
That might be muliple connection profiles providing the same rule, or rules that are added
externally by the user. NMPRulesManager mediates for that.
This is done by NMPRulesManager "tracking" rules.
Rules that are not tracked by NMPRulesManager are completely ignored (and
considered externally added).
When tracking a rule, the caller provides a track-priority. If multiple
parties track a rule, then the highest (absolute value of the) priority
wins.
If the highest track-priority is positive, NMPRulesManager will add the rule if
it's not present.
When the highest track-priority is negative, then NMPRulesManager will remove the
rule if it's present (enforce its absence).
The complicated part is, when a rule that was previously tracked becomes no
longer tracked. In that case, we need to restore the previous state.
If NetworkManager added the rule earlier, then untracking the rule
NMPRulesManager will remove the rule again (restore its previous absent
state).
By default, if NetworkManager had a negative tracking-priority and removed the
rule earlier (enforced it to be absent), then when the rule becomes no
longer tracked, NetworkManager will not restore the rule.
Consider: the user adds a rule externally, and then activates a profile that
enforces the absence of the rule (causing NetworkManager to remove it).
When deactivating the profile, by default NetworkManager will not
restore such a rule! It's unclear whether that is a good idea, but it's
also unclear why the rule is there and whether NetworkManager should
really restore it.
Add weakly tracked rules to account for that. A tracking-priority of
zero indicates such weakly tracked rules. The only difference between an untracked
rule and a weakly tracked rule is, that when NetworkManager earlier removed the
rule (due to a negative tracking-priority), it *will* restore weakly
tracked rules when the rules becomes no longer (negatively) tracked.
And it attmpts to do that only once.
Likewise, if the rule is weakly tracked and already exists when
NMPRulesManager starts posively tracking the rule, then it would not
remove again, when no longer positively tracking it.
2019-04-10 13:47:52 +02:00
|
|
|
nmp_rules_manager_track_from_platform(priv->rules_manager,
|
|
|
|
|
NULL,
|
|
|
|
|
AF_UNSPEC,
|
|
|
|
|
0,
|
policy-routing: take ownership of externally configured rules
IP addresses, routes, TC and QDiscs are all tied to a certain interface.
So when NetworkManager manages an interface, it can be confident that
all related entires should be managed, deleted and modified by NetworkManager.
Routing policy rules are global. For that we have NMPRulesManager which
keeps track of whether NetworkManager owns a rule. This allows multiple
connection profiles to specify the same rule, and NMPRulesManager can
consolidate this information to know whether to add or remove the rule.
NMPRulesManager would also support to explicitly block a rule by
tracking it with negative priority. However that is still unused at
the moment. All that devices do is to add rules (track with positive
priority) and remove them (untrack) once the profile gets deactivated.
As rules are not exclusively owned by NetworkManager, NetworkManager
tries not to interfere with rules that it knows nothing about. That
means in particular, when NetworkManager starts it will "weakly track"
all rules that are present. "weakly track" is mostly interesting for two
cases:
- when NMPRulesManager had the same rule explicitly tracked (added) by a
device, then deactivating the device will leave the rule in place.
- when NMPRulesManager had the same rule explicitly blocked (tracked
with negative priority), then it would restore the rule when that
block gets removed (as said, currently nobody actually does this).
Note that when restarting NetworkManager, then the device may stay and
the rules kept. However after restart, NetworkManager no longer knows
that it previously added this route, so it would weakly track it and
never remove them again.
That is a problem. Avoid that, by whenever explicitly tracking a rule we
also make sure to no longer weakly track it. Most likely this rule was
indeed previously managed by NetworkManager. If this was really a rule
added by externally, then the user really should choose distinct
rule priorities to avoid such conflicts altogether.
2019-07-12 11:19:43 +02:00
|
|
|
NMP_RULES_MANAGER_EXTERN_WEAKLY_TRACKED_USER_TAG);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
G_OBJECT_CLASS(nm_netns_parent_class)->constructed(object);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-07-21 11:21:44 +02:00
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_LINK_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
l3cfg: add nm_l3cfg_property_emit_register() API
The NML3Cfg instance tracks and prepares the IP configuration.
However, that is also partly exposed on other objects, like
NMIP4Config's "route-data" property.
Add an API, so that NMIP4Config can register itself to be notified
when something relevant changes.
This is an alternative to standard GObject properties and signals. They
often seem more effort than worth. That is, because in this case,
NMIP4Config.route-data has no other task then to re-emit the signal.
So, to implement that with GObject properties/signals, we would have to
add a property/signal to NML3Cfg, subscribe to it from NMIP4Config,
and remit the signal. An alternative is to bind properties, but that
would still be quite some extra code, and unclear that it would be
simpler. Not to mention the overhead, as bindings are themself full
GObject instances, that register to and emit signals by name.
2020-07-21 12:52:42 +02:00
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP4_ROUTE_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
|
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP6_ROUTE_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
2020-07-29 08:39:12 +02:00
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP4_ADDRESS_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
|
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP6_ADDRESS_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
2017-04-17 18:40:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NMNetns *
|
|
|
|
|
nm_netns_new(NMPlatform *platform)
|
|
|
|
|
{
|
|
|
|
|
return g_object_new(NM_TYPE_NETNS, NM_NETNS_PLATFORM, platform, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dispose(GObject *object)
|
|
|
|
|
{
|
|
|
|
|
NMNetns * self = NM_NETNS(object);
|
|
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
nm_assert(nm_g_hash_table_size(priv->l3cfgs) == 0);
|
2020-07-21 11:21:44 +02:00
|
|
|
nm_assert(c_list_is_empty(&priv->l3cfg_signal_pending_lst_head));
|
2020-09-14 15:07:18 +02:00
|
|
|
nm_assert(!priv->shared_ips);
|
2020-07-21 11:21:44 +02:00
|
|
|
|
|
|
|
|
nm_clear_g_source(&priv->signal_pending_idle_id);
|
|
|
|
|
|
|
|
|
|
if (priv->platform)
|
|
|
|
|
g_signal_handlers_disconnect_by_data(priv->platform, &priv->_self_signal_user_data);
|
2020-07-18 19:01:04 +02:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
g_clear_object(&priv->platform);
|
2020-07-18 19:01:04 +02:00
|
|
|
g_clear_pointer(&priv->l3cfgs, g_hash_table_unref);
|
2017-04-17 18:40:52 +02:00
|
|
|
|
2019-03-11 11:37:40 +01:00
|
|
|
nm_clear_pointer(&priv->rules_manager, nmp_rules_manager_unref);
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
G_OBJECT_CLASS(nm_netns_parent_class)->dispose(object);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
nm_netns_class_init(NMNetnsClass *klass)
|
|
|
|
|
{
|
|
|
|
|
GObjectClass *object_class = G_OBJECT_CLASS(klass);
|
|
|
|
|
|
|
|
|
|
object_class->constructed = constructed;
|
|
|
|
|
object_class->set_property = set_property;
|
|
|
|
|
object_class->dispose = dispose;
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_PLATFORM] =
|
|
|
|
|
g_param_spec_object(NM_NETNS_PLATFORM,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
NM_TYPE_PLATFORM,
|
|
|
|
|
G_PARAM_WRITABLE | G_PARAM_CONSTRUCT_ONLY | G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
g_object_class_install_properties(object_class, _PROPERTY_ENUMS_LAST, obj_properties);
|
|
|
|
|
}
|