2020-12-23 22:21:36 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2019-09-25 13:13:40 +02:00
|
|
|
/*
|
2017-04-17 18:40:52 +02:00
|
|
|
* Copyright (C) 2017 Red Hat, Inc.
|
|
|
|
|
*/
|
|
|
|
|
|
2021-02-04 18:04:13 +01:00
|
|
|
#include "src/core/nm-default-daemon.h"
|
2017-04-17 18:40:52 +02:00
|
|
|
|
|
|
|
|
#include "nm-netns.h"
|
|
|
|
|
|
2023-02-01 08:58:15 +01:00
|
|
|
#include <linux/rtnetlink.h>
|
|
|
|
|
|
2021-02-18 17:37:47 +01:00
|
|
|
#include "libnm-glib-aux/nm-dedup-multi.h"
|
|
|
|
|
#include "libnm-glib-aux/nm-c-list.h"
|
core: pass NMDedupMultiIndex instance to NMIP4Config and other
NMIP4Config, NMIP6Config, and NMPlatform shall share one
NMDedupMultiIndex instance.
For that, pass an NMDedupMultiIndex instance to NMPlatform and NMNetns.
NMNetns than passes it on to NMDevice, NMDhcpClient, NMIP4Config and NMIP6Config.
So currently NMNetns is the access point to the shared NMDedupMultiIndex
instance, and it gets it from it's NMPlatform instance.
The NMDedupMultiIndex instance is really a singleton, we don't want
multiple instances of it. However, for testing, instead of adding a
singleton instance, pass the instance explicitly around.
2017-06-12 08:16:47 +02:00
|
|
|
|
2019-03-11 11:37:40 +01:00
|
|
|
#include "NetworkManagerUtils.h"
|
2021-02-12 15:01:09 +01:00
|
|
|
#include "libnm-core-intern/nm-core-internal.h"
|
2020-07-18 19:01:04 +02:00
|
|
|
#include "nm-l3cfg.h"
|
2021-03-04 11:29:39 +01:00
|
|
|
#include "libnm-platform/nm-platform.h"
|
2021-02-18 08:13:35 +01:00
|
|
|
#include "libnm-platform/nmp-netns.h"
|
2022-07-18 10:07:21 +02:00
|
|
|
#include "libnm-platform/nmp-global-tracker.h"
|
2022-11-23 08:29:48 +01:00
|
|
|
#include "libnm-std-aux/c-list-util.h"
|
2017-04-17 18:40:52 +02:00
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2023-03-06 15:32:16 +01:00
|
|
|
typedef struct {
|
|
|
|
|
gconstpointer tag;
|
|
|
|
|
CList watcher_by_tag_lst_head;
|
|
|
|
|
} WatcherByTag;
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
NMIPAddrTyped addr;
|
|
|
|
|
CList watcher_ip_addr_lst_head;
|
|
|
|
|
} WatcherDataIPAddr;
|
|
|
|
|
|
|
|
|
|
struct _NMNetnsWatcherHandle {
|
|
|
|
|
NMNetnsWatcherType watcher_type;
|
|
|
|
|
NMNetnsWatcherData watcher_data;
|
|
|
|
|
gconstpointer tag;
|
|
|
|
|
NMNetnsWatcherCallback callback;
|
|
|
|
|
gpointer callback_user_data;
|
|
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
/* This is linked to "WatcherByTag.watcher_by_tag_lst_head" in
|
2023-03-06 15:32:16 +01:00
|
|
|
* "priv->watcher_by_tag_idx". */
|
|
|
|
|
CList watcher_tag_lst;
|
|
|
|
|
|
|
|
|
|
/* The registration data, which depends on the "watcher_type". */
|
|
|
|
|
union {
|
|
|
|
|
struct {
|
|
|
|
|
CList watcher_ip_addr_lst;
|
|
|
|
|
} ip_addr;
|
|
|
|
|
} reg_data;
|
|
|
|
|
|
|
|
|
|
/* nm_netns_watcher_add() will mark the handle as non-dirty, while
|
|
|
|
|
* nm_netns_watcher_remove_all() can delete only dirty handles (while
|
|
|
|
|
* leaving non-dirty handles alive, but marking them as dirty).
|
|
|
|
|
*
|
|
|
|
|
* That allows a pattern where you just add the new handles that you want
|
|
|
|
|
* now, and then call nm_netns_watcher_remove_all() to remove those that
|
|
|
|
|
* should no longer be present. */
|
|
|
|
|
bool watcher_dirty : 1;
|
|
|
|
|
};
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
NM_GOBJECT_PROPERTIES_DEFINE_BASE(PROP_PLATFORM, );
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
NMNetns *_self_signal_user_data;
|
|
|
|
|
NMPlatform *platform;
|
|
|
|
|
NMPNetns *platform_netns;
|
|
|
|
|
NMPGlobalTracker *global_tracker;
|
|
|
|
|
GHashTable *l3cfgs;
|
2025-09-06 14:46:35 +02:00
|
|
|
GHashTable *ip_reservation[_NM_NETNS_IP_RESERVATION_TYPE_NUM];
|
2022-11-23 08:29:48 +01:00
|
|
|
GHashTable *ecmp_track_by_obj;
|
|
|
|
|
GHashTable *ecmp_track_by_ecmpid;
|
2023-03-06 15:32:16 +01:00
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
/* Indexes the watcher handles. */
|
2023-03-06 15:32:16 +01:00
|
|
|
GHashTable *watcher_idx;
|
|
|
|
|
|
2025-02-28 17:56:00 +01:00
|
|
|
/* An index of WatcherByTag. It allows one to lookup watcher handles by tag.
|
2023-03-06 15:32:16 +01:00
|
|
|
* Handles without tag are not indexed. */
|
|
|
|
|
GHashTable *watcher_by_tag_idx;
|
|
|
|
|
|
|
|
|
|
/* Index for WatcherDataIPAddr instances. Allows to lookup all subscribers
|
|
|
|
|
* by IP address. */
|
|
|
|
|
GHashTable *watcher_ip_data_idx;
|
|
|
|
|
|
|
|
|
|
CList l3cfg_signal_pending_lst_head;
|
|
|
|
|
GSource *signal_pending_idle_source;
|
2017-04-17 18:40:52 +02:00
|
|
|
} NMNetnsPrivate;
|
|
|
|
|
|
|
|
|
|
struct _NMNetns {
|
|
|
|
|
GObject parent;
|
|
|
|
|
NMNetnsPrivate _priv;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct _NMNetnsClass {
|
|
|
|
|
GObjectClass parent;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
G_DEFINE_TYPE(NMNetns, nm_netns, G_TYPE_OBJECT);
|
|
|
|
|
|
|
|
|
|
#define NM_NETNS_GET_PRIVATE(self) _NM_GET_PRIVATE(self, NMNetns, NM_IS_NETNS)
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
#define _NMLOG_DOMAIN LOGD_CORE
|
|
|
|
|
#define _NMLOG_PREFIX_NAME "netns"
|
|
|
|
|
#define _NMLOG(level, ...) \
|
|
|
|
|
G_STMT_START \
|
|
|
|
|
{ \
|
|
|
|
|
nm_log((level), \
|
|
|
|
|
(_NMLOG_DOMAIN), \
|
|
|
|
|
NULL, \
|
|
|
|
|
NULL, \
|
|
|
|
|
"netns[" NM_HASH_OBFUSCATE_PTR_FMT "]: " _NM_UTILS_MACRO_FIRST(__VA_ARGS__), \
|
|
|
|
|
NM_HASH_OBFUSCATE_PTR(self) _NM_UTILS_MACRO_REST(__VA_ARGS__)); \
|
|
|
|
|
} \
|
|
|
|
|
G_STMT_END
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
NM_DEFINE_SINGLETON_GETTER(NMNetns, nm_netns_get, NM_TYPE_NETNS);
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
#define nm_assert_l3cfg(self, l3cfg) \
|
|
|
|
|
G_STMT_START \
|
|
|
|
|
{ \
|
|
|
|
|
NMNetns *_self = (self); \
|
|
|
|
|
NML3Cfg *_l3cfg = (l3cfg); \
|
|
|
|
|
\
|
|
|
|
|
nm_assert(NM_IS_NETNS(self)); \
|
|
|
|
|
nm_assert(NM_IS_L3CFG(_l3cfg)); \
|
|
|
|
|
if (NM_MORE_ASSERTS > 5) \
|
|
|
|
|
nm_assert(_l3cfg == nm_netns_l3cfg_get(_self, nm_l3cfg_get_ifindex(_l3cfg))); \
|
|
|
|
|
} \
|
|
|
|
|
G_STMT_END
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2023-03-06 15:32:16 +01:00
|
|
|
static WatcherDataIPAddr *
|
|
|
|
|
_watcher_ip_data_lookup(NMNetns *self, int addr_family, gconstpointer addr);
|
|
|
|
|
static void _watcher_handle_notify(NMNetns *self,
|
|
|
|
|
NMNetnsWatcherHandle *handle,
|
|
|
|
|
const NMNetnsWatcherEventData *event_data);
|
|
|
|
|
static const char *
|
|
|
|
|
_watcher_handle_to_string(const NMNetnsWatcherHandle *handle, char *buf, gsize buf_size);
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
NM_NETNS_WATCHER_TYPE_VALID(NMNetnsWatcherType watcher_type)
|
|
|
|
|
{
|
|
|
|
|
return NM_IN_SET(watcher_type, NM_NETNS_WATCHER_TYPE_IP_ADDR);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
typedef struct {
|
|
|
|
|
const NMPObject *representative_obj;
|
|
|
|
|
const NMPObject *merged_obj;
|
|
|
|
|
CList ecmpid_lst_head;
|
|
|
|
|
bool needs_update : 1;
|
|
|
|
|
bool already_visited : 1;
|
|
|
|
|
} EcmpTrackEcmpid;
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
const NMPObject *obj;
|
|
|
|
|
|
|
|
|
|
NML3Cfg *l3cfg;
|
|
|
|
|
EcmpTrackEcmpid *parent_track_ecmpid;
|
|
|
|
|
|
|
|
|
|
CList ifindex_lst;
|
|
|
|
|
CList ecmpid_lst;
|
|
|
|
|
|
|
|
|
|
/* Calling nm_netns_ip_route_ecmp_register() will ensure that the tracked
|
|
|
|
|
* entry is non-dirty. This can be used to remove stale entries. */
|
|
|
|
|
bool dirty : 1;
|
2023-02-01 08:37:51 +01:00
|
|
|
|
|
|
|
|
/* This flag is set during nm_netns_ip_route_ecmp_register(), when first tracking the
|
|
|
|
|
* route. It is cleared on the next nm_netns_ip_route_ecmp_commit(). It thus only
|
|
|
|
|
* exists for a short time, to know during a commit that the route is new and
|
|
|
|
|
* we need to do something special. */
|
2023-01-31 10:30:04 +01:00
|
|
|
bool is_new : 1;
|
2023-02-01 08:37:51 +01:00
|
|
|
|
|
|
|
|
/* The entry is ready to be configured. This exists, because the nexthop of
|
|
|
|
|
* a route must be reachable directly (being onlink). That is, we may need
|
|
|
|
|
* to add a direct, single-hop route to the gateway, which is done by
|
|
|
|
|
* the NML3Cfg of that interface. Since the NML3Cfg calls nm_netns_ip_route_ecmp_commit()
|
|
|
|
|
* and only adds the direct route afterwards, the ECMP route may not be ready
|
|
|
|
|
* right away, but only upon seeing the entry a second time. */
|
2023-01-31 10:30:04 +01:00
|
|
|
bool is_ready : 1;
|
2022-11-23 08:29:48 +01:00
|
|
|
} EcmpTrackObj;
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
_ecmp_track_sort_lst_cmp(const CList *a, const CList *b, const void *user_data)
|
|
|
|
|
{
|
|
|
|
|
EcmpTrackObj *track_obj_a = c_list_entry(a, EcmpTrackObj, ecmpid_lst);
|
|
|
|
|
EcmpTrackObj *track_obj_b = c_list_entry(b, EcmpTrackObj, ecmpid_lst);
|
|
|
|
|
const NMPlatformIP4Route *route_a = NMP_OBJECT_CAST_IP4_ROUTE(track_obj_a->obj);
|
|
|
|
|
const NMPlatformIP4Route *route_b = NMP_OBJECT_CAST_IP4_ROUTE(track_obj_b->obj);
|
|
|
|
|
|
|
|
|
|
nm_assert(route_a->ifindex > 0);
|
|
|
|
|
nm_assert(route_a->n_nexthops <= 1);
|
|
|
|
|
nm_assert(route_b->ifindex > 0);
|
|
|
|
|
nm_assert(route_b->n_nexthops <= 1);
|
|
|
|
|
|
|
|
|
|
NM_CMP_FIELD(route_a, route_b, ifindex);
|
|
|
|
|
NM_CMP_FIELD(route_b, route_a, weight);
|
|
|
|
|
NM_CMP_DIRECT(htonl(route_a->gateway), htonl(route_b->gateway));
|
|
|
|
|
|
|
|
|
|
return nm_assert_unreachable_val(
|
|
|
|
|
nm_platform_ip4_route_cmp(route_a, route_b, NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_ecmp_track_init_merged_obj(EcmpTrackEcmpid *track_ecmpid, const NMPObject **out_obj_del)
|
|
|
|
|
{
|
|
|
|
|
EcmpTrackObj *track_obj;
|
|
|
|
|
nm_auto_nmpobj const NMPObject *obj_new = NULL;
|
|
|
|
|
gsize n_nexthops;
|
|
|
|
|
gsize i;
|
|
|
|
|
|
|
|
|
|
nm_assert(track_ecmpid);
|
|
|
|
|
nm_assert(!c_list_is_empty(&track_ecmpid->ecmpid_lst_head));
|
|
|
|
|
nm_assert(track_ecmpid->representative_obj
|
|
|
|
|
== c_list_first_entry(&track_ecmpid->ecmpid_lst_head, EcmpTrackObj, ecmpid_lst)->obj);
|
|
|
|
|
nm_assert(out_obj_del && !*out_obj_del);
|
|
|
|
|
|
|
|
|
|
if (!track_ecmpid->needs_update) {
|
|
|
|
|
/* Already up to date. Nothing to do. */
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
track_ecmpid->needs_update = FALSE;
|
|
|
|
|
|
|
|
|
|
n_nexthops = c_list_length(&track_ecmpid->ecmpid_lst_head);
|
|
|
|
|
|
|
|
|
|
if (n_nexthops == 1) {
|
|
|
|
|
/* There is only a single entry. There is nothing to merge, just set
|
|
|
|
|
* the first entry. */
|
|
|
|
|
obj_new = nmp_object_ref(track_ecmpid->representative_obj);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We want that the nexthop list is deterministic. We thus sort the list and update
|
|
|
|
|
* the representative_obj. */
|
|
|
|
|
c_list_sort(&track_ecmpid->ecmpid_lst_head, _ecmp_track_sort_lst_cmp, NULL);
|
|
|
|
|
nmp_object_ref_set(
|
|
|
|
|
&track_ecmpid->representative_obj,
|
|
|
|
|
c_list_first_entry(&track_ecmpid->ecmpid_lst_head, EcmpTrackObj, ecmpid_lst)->obj);
|
|
|
|
|
|
|
|
|
|
obj_new = nmp_object_clone(track_ecmpid->representative_obj, FALSE);
|
|
|
|
|
|
|
|
|
|
nm_assert(obj_new->ip4_route.n_nexthops <= 1);
|
|
|
|
|
nm_assert(!obj_new->_ip4_route.extra_nexthops);
|
|
|
|
|
|
|
|
|
|
/* Note that there actually cannot be duplicate (ifindex,gateway,weight) tuples, because
|
|
|
|
|
* NML3Cfg uses NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID to track the routes, and track_ecmpid
|
|
|
|
|
* groups them further by NM_PLATFORM_IP_ROUTE_CMP_TYPE_ECMP_ID. The comparison for
|
|
|
|
|
* ECMP_ID is a strict superset of ID, hence there are no dupliated.
|
|
|
|
|
*
|
|
|
|
|
* Also, kernel wouldn't care if there were duplicate nexthops anyway.
|
|
|
|
|
*
|
|
|
|
|
* This means, it's gonna be simple. We sorted the single-hop routes by next-hop,
|
|
|
|
|
* now just create a plain list of the nexthops (no check for duplciates, etc). */
|
|
|
|
|
|
|
|
|
|
((NMPObject *) obj_new)->ip4_route.n_nexthops = n_nexthops;
|
|
|
|
|
((NMPObject *) obj_new)->_ip4_route.extra_nexthops =
|
|
|
|
|
g_new(NMPlatformIP4RtNextHop, n_nexthops - 1u);
|
|
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
|
c_list_for_each_entry (track_obj, &track_ecmpid->ecmpid_lst_head, ecmpid_lst) {
|
|
|
|
|
if (i > 0) {
|
|
|
|
|
const NMPlatformIP4Route *r = NMP_OBJECT_CAST_IP4_ROUTE(track_obj->obj);
|
|
|
|
|
NMPlatformIP4RtNextHop *nh = (gpointer) &obj_new->_ip4_route.extra_nexthops[i - 1];
|
|
|
|
|
|
2024-10-04 10:45:56 +02:00
|
|
|
*nh = (NMPlatformIP4RtNextHop) {
|
2022-11-23 08:29:48 +01:00
|
|
|
.ifindex = r->ifindex,
|
|
|
|
|
.gateway = r->gateway,
|
|
|
|
|
.weight = r->weight,
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
nm_assert(obj_new);
|
|
|
|
|
if (nmp_object_equal(track_ecmpid->merged_obj, obj_new))
|
2022-12-22 17:58:12 +01:00
|
|
|
/* the objects are equal but the update was needed, for example if the
|
|
|
|
|
* routes were removed from kernel but not from our tracking
|
|
|
|
|
* dictionaries and therefore we tried to register them again. */
|
|
|
|
|
return TRUE;
|
2022-11-23 08:29:48 +01:00
|
|
|
|
|
|
|
|
if (track_ecmpid->merged_obj)
|
|
|
|
|
*out_obj_del = g_steal_pointer(&track_ecmpid->merged_obj);
|
|
|
|
|
track_ecmpid->merged_obj = g_steal_pointer(&obj_new);
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
NMPNetns *
|
|
|
|
|
nm_netns_get_platform_netns(NMNetns *self)
|
|
|
|
|
{
|
|
|
|
|
return NM_NETNS_GET_PRIVATE(self)->platform_netns;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NMPlatform *
|
|
|
|
|
nm_netns_get_platform(NMNetns *self)
|
|
|
|
|
{
|
|
|
|
|
return NM_NETNS_GET_PRIVATE(self)->platform;
|
|
|
|
|
}
|
|
|
|
|
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
NMPGlobalTracker *
|
|
|
|
|
nm_netns_get_global_tracker(NMNetns *self)
|
2019-03-11 11:37:40 +01:00
|
|
|
{
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
return NM_NETNS_GET_PRIVATE(self)->global_tracker;
|
2019-03-11 11:37:40 +01:00
|
|
|
}
|
|
|
|
|
|
core: pass NMDedupMultiIndex instance to NMIP4Config and other
NMIP4Config, NMIP6Config, and NMPlatform shall share one
NMDedupMultiIndex instance.
For that, pass an NMDedupMultiIndex instance to NMPlatform and NMNetns.
NMNetns than passes it on to NMDevice, NMDhcpClient, NMIP4Config and NMIP6Config.
So currently NMNetns is the access point to the shared NMDedupMultiIndex
instance, and it gets it from it's NMPlatform instance.
The NMDedupMultiIndex instance is really a singleton, we don't want
multiple instances of it. However, for testing, instead of adding a
singleton instance, pass the instance explicitly around.
2017-06-12 08:16:47 +02:00
|
|
|
NMDedupMultiIndex *
|
|
|
|
|
nm_netns_get_multi_idx(NMNetns *self)
|
|
|
|
|
{
|
|
|
|
|
return nm_platform_get_multi_idx(NM_NETNS_GET_PRIVATE(self)->platform);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
static guint
|
|
|
|
|
_ecmp_routes_by_ecmpid_hash(gconstpointer ptr)
|
|
|
|
|
{
|
|
|
|
|
const NMPObject *const *p_obj = ptr;
|
|
|
|
|
|
|
|
|
|
return nm_platform_ip4_route_hash(NMP_OBJECT_CAST_IP4_ROUTE(*p_obj),
|
|
|
|
|
NM_PLATFORM_IP_ROUTE_CMP_TYPE_ECMP_ID);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
_ecmp_routes_by_ecmpid_equal(gconstpointer ptr_a, gconstpointer ptr_b)
|
|
|
|
|
{
|
|
|
|
|
const NMPObject *const *p_obj_a = ptr_a;
|
|
|
|
|
const NMPObject *const *p_obj_b = ptr_b;
|
|
|
|
|
|
|
|
|
|
return nm_platform_ip4_route_cmp(NMP_OBJECT_CAST_IP4_ROUTE(*p_obj_a),
|
|
|
|
|
NMP_OBJECT_CAST_IP4_ROUTE(*p_obj_b),
|
|
|
|
|
NM_PLATFORM_IP_ROUTE_CMP_TYPE_ECMP_ID)
|
|
|
|
|
== 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_ecmp_routes_by_ecmpid_free(gpointer ptr)
|
|
|
|
|
{
|
|
|
|
|
EcmpTrackEcmpid *track_ecmpid = ptr;
|
|
|
|
|
|
|
|
|
|
c_list_unlink_stale(&track_ecmpid->ecmpid_lst_head);
|
|
|
|
|
nmp_object_unref(track_ecmpid->representative_obj);
|
|
|
|
|
nmp_object_unref(track_ecmpid->merged_obj);
|
|
|
|
|
nm_g_slice_free(track_ecmpid);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_ecmp_routes_by_obj_free(gpointer ptr)
|
|
|
|
|
{
|
|
|
|
|
EcmpTrackObj *track_obj = ptr;
|
|
|
|
|
|
|
|
|
|
c_list_unlink_stale(&track_obj->ifindex_lst);
|
|
|
|
|
c_list_unlink_stale(&track_obj->ecmpid_lst);
|
|
|
|
|
nmp_object_unref(track_obj->obj);
|
|
|
|
|
nm_g_slice_free(track_obj);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
static NML3Cfg *
|
|
|
|
|
_l3cfg_hashed_to_l3cfg(gpointer ptr)
|
|
|
|
|
{
|
|
|
|
|
gpointer l3cfg;
|
|
|
|
|
|
|
|
|
|
l3cfg = &(((char *) ptr)[-G_STRUCT_OFFSET(NML3Cfg, priv.ifindex)]);
|
|
|
|
|
nm_assert(NM_IS_L3CFG(l3cfg));
|
|
|
|
|
return l3cfg;
|
|
|
|
|
}
|
2020-07-18 19:01:04 +02:00
|
|
|
|
|
|
|
|
static void
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
_l3cfg_hashed_free(gpointer ptr)
|
2020-07-18 19:01:04 +02:00
|
|
|
{
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
NML3Cfg *l3cfg = _l3cfg_hashed_to_l3cfg(ptr);
|
2020-07-18 19:01:04 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
c_list_unlink(&l3cfg->internal_netns.signal_pending_lst);
|
2020-07-18 19:01:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_l3cfg_weak_notify(gpointer data, GObject *where_the_object_was)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns *self = NM_NETNS(data);
|
2020-07-18 19:01:04 +02:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(data);
|
2021-11-09 13:28:54 +01:00
|
|
|
NML3Cfg *l3cfg = NM_L3CFG(where_the_object_was);
|
2020-07-18 19:01:04 +02:00
|
|
|
int ifindex = nm_l3cfg_get_ifindex(l3cfg);
|
|
|
|
|
|
|
|
|
|
if (!g_hash_table_remove(priv->l3cfgs, &ifindex))
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
|
|
|
|
if (NM_UNLIKELY(g_hash_table_size(priv->l3cfgs) == 0))
|
|
|
|
|
g_object_unref(self);
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-21 12:15:06 +02:00
|
|
|
NML3Cfg *
|
2021-08-06 15:17:05 +02:00
|
|
|
nm_netns_l3cfg_get(NMNetns *self, int ifindex)
|
2020-10-21 12:15:06 +02:00
|
|
|
{
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
gpointer ptr;
|
2020-10-21 12:15:06 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
nm_assert(ifindex > 0);
|
2022-12-09 18:04:09 +01:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
ptr = g_hash_table_lookup(priv->l3cfgs, &ifindex);
|
|
|
|
|
return ptr ? _l3cfg_hashed_to_l3cfg(ptr) : NULL;
|
2020-10-21 12:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
NML3Cfg *
|
2021-08-06 15:17:05 +02:00
|
|
|
nm_netns_l3cfg_acquire(NMNetns *self, int ifindex)
|
2020-07-18 19:01:04 +02:00
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv;
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
NML3Cfg *l3cfg;
|
2020-07-18 19:01:04 +02:00
|
|
|
|
|
|
|
|
g_return_val_if_fail(NM_IS_NETNS(self), NULL);
|
|
|
|
|
g_return_val_if_fail(ifindex > 0, NULL);
|
|
|
|
|
|
|
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
l3cfg = nm_netns_l3cfg_get(self, ifindex);
|
|
|
|
|
if (l3cfg) {
|
2020-07-18 19:01:04 +02:00
|
|
|
nm_log_trace(LOGD_CORE,
|
|
|
|
|
"l3cfg[" NM_HASH_OBFUSCATE_PTR_FMT ",ifindex=%d] %s",
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
NM_HASH_OBFUSCATE_PTR(l3cfg),
|
2020-07-18 19:01:04 +02:00
|
|
|
ifindex,
|
|
|
|
|
"referenced");
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
return g_object_ref(l3cfg);
|
2020-07-18 19:01:04 +02:00
|
|
|
}
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
l3cfg = nm_l3cfg_new(self, ifindex);
|
2020-07-18 19:01:04 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
if (!g_hash_table_add(priv->l3cfgs, &l3cfg->priv.ifindex))
|
2020-07-18 19:01:04 +02:00
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
|
|
|
|
if (NM_UNLIKELY(g_hash_table_size(priv->l3cfgs) == 1))
|
|
|
|
|
g_object_ref(self);
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
g_object_weak_ref(G_OBJECT(l3cfg), _l3cfg_weak_notify, self);
|
2020-07-18 19:01:04 +02:00
|
|
|
|
|
|
|
|
/* Transfer ownership! We keep only a weak ref. */
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
return l3cfg;
|
2020-07-18 19:01:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2020-07-21 11:21:44 +02:00
|
|
|
static gboolean
|
|
|
|
|
_platform_signal_on_idle_cb(gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
gs_unref_object NMNetns *self = g_object_ref(NM_NETNS(user_data));
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
NML3Cfg *l3cfg;
|
2020-09-16 16:49:46 +02:00
|
|
|
CList work_list;
|
2020-07-21 11:21:44 +02:00
|
|
|
|
2021-10-07 08:04:45 +02:00
|
|
|
nm_clear_g_source_inst(&priv->signal_pending_idle_source);
|
2020-09-16 16:49:46 +02:00
|
|
|
|
|
|
|
|
/* we emit all queued signals together. However, we don't want to hook the
|
|
|
|
|
* main loop for longer than the currently queued elements.
|
|
|
|
|
*
|
|
|
|
|
* If we catch more change events, they will be queued and processed by a future
|
|
|
|
|
* idle handler.
|
|
|
|
|
*
|
|
|
|
|
* Hence, move the list to a temporary list. Isn't CList great? */
|
|
|
|
|
|
|
|
|
|
c_list_init(&work_list);
|
|
|
|
|
c_list_splice(&work_list, &priv->l3cfg_signal_pending_lst_head);
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
while ((l3cfg = c_list_first_entry(&work_list, NML3Cfg, internal_netns.signal_pending_lst))) {
|
|
|
|
|
nm_assert(NM_IS_L3CFG(l3cfg));
|
|
|
|
|
c_list_unlink(&l3cfg->internal_netns.signal_pending_lst);
|
2020-09-23 18:55:08 +02:00
|
|
|
_nm_l3cfg_notify_platform_change_on_idle(
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
l3cfg,
|
|
|
|
|
nm_steal_int(&l3cfg->internal_netns.signal_pending_obj_type_flags));
|
2020-07-21 11:21:44 +02:00
|
|
|
}
|
|
|
|
|
|
2021-10-07 08:04:45 +02:00
|
|
|
return G_SOURCE_CONTINUE;
|
2020-07-21 11:21:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
_platform_signal_cb(NMPlatform *platform,
|
2020-07-21 11:21:44 +02:00
|
|
|
int obj_type_i,
|
|
|
|
|
int ifindex,
|
|
|
|
|
gconstpointer platform_object,
|
|
|
|
|
int change_type_i,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns **p_self)
|
2020-07-21 11:21:44 +02:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns *self = NM_NETNS(*p_self);
|
|
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
2020-07-21 11:21:44 +02:00
|
|
|
const NMPObjectType obj_type = obj_type_i;
|
2020-07-29 08:39:12 +02:00
|
|
|
const NMPlatformSignalChangeType change_type = change_type_i;
|
2022-11-23 08:29:48 +01:00
|
|
|
NML3Cfg *l3cfg;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2022-12-15 17:17:38 +01:00
|
|
|
if (ifindex <= 0) {
|
|
|
|
|
/* platform signal callback could be triggered by nodev routes, skip them */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
l3cfg = nm_netns_l3cfg_get(self, ifindex);
|
|
|
|
|
if (!l3cfg)
|
2023-03-06 15:32:16 +01:00
|
|
|
goto notify_watcher;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
l3cfg->internal_netns.signal_pending_obj_type_flags |= nmp_object_type_to_flags(obj_type);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
if (c_list_is_empty(&l3cfg->internal_netns.signal_pending_lst)) {
|
|
|
|
|
c_list_link_tail(&priv->l3cfg_signal_pending_lst_head,
|
|
|
|
|
&l3cfg->internal_netns.signal_pending_lst);
|
2021-10-07 08:04:45 +02:00
|
|
|
if (!priv->signal_pending_idle_source)
|
|
|
|
|
priv->signal_pending_idle_source =
|
|
|
|
|
nm_g_idle_add_source(_platform_signal_on_idle_cb, self);
|
2020-07-29 08:39:12 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
_nm_l3cfg_notify_platform_change(l3cfg, change_type, NMP_OBJECT_UP_CAST(platform_object));
|
2023-03-06 15:32:16 +01:00
|
|
|
|
|
|
|
|
notify_watcher:
|
|
|
|
|
switch (obj_type) {
|
|
|
|
|
case NMP_OBJECT_TYPE_IP4_ADDRESS:
|
|
|
|
|
case NMP_OBJECT_TYPE_IP6_ADDRESS:
|
|
|
|
|
{
|
|
|
|
|
NMNetnsWatcherHandle *handle;
|
|
|
|
|
NMNetnsWatcherHandle *handle_safe;
|
|
|
|
|
WatcherDataIPAddr *data;
|
|
|
|
|
|
|
|
|
|
data =
|
|
|
|
|
_watcher_ip_data_lookup(self,
|
|
|
|
|
obj_type == NMP_OBJECT_TYPE_IP4_ADDRESS ? AF_INET : AF_INET6,
|
|
|
|
|
((const NMPlatformIPAddress *) platform_object)->address_ptr);
|
|
|
|
|
|
|
|
|
|
if (data) {
|
|
|
|
|
const NMNetnsWatcherEventData event_data = {
|
|
|
|
|
.ip_addr =
|
|
|
|
|
{
|
|
|
|
|
.change_type = change_type,
|
|
|
|
|
.obj = NMP_OBJECT_UP_CAST(platform_object),
|
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
char sbuf[500];
|
|
|
|
|
|
|
|
|
|
c_list_for_each_entry_safe (handle,
|
|
|
|
|
handle_safe,
|
|
|
|
|
&data->watcher_ip_addr_lst_head,
|
|
|
|
|
reg_data.ip_addr.watcher_ip_addr_lst) {
|
|
|
|
|
_LOGT("netns-watcher: %s %s",
|
|
|
|
|
"notify",
|
|
|
|
|
_watcher_handle_to_string(handle, sbuf, sizeof(sbuf)));
|
|
|
|
|
|
|
|
|
|
/* Note that we dispatch these events directly from the platform event
|
|
|
|
|
* and while iterating over "data".
|
|
|
|
|
*
|
|
|
|
|
* From the callback, it's probably a bad idea to do anything in platform
|
|
|
|
|
* that might change anything (emit new signals) or to nm_netns_watcher_remove*()
|
|
|
|
|
* any other watcher.
|
|
|
|
|
*
|
|
|
|
|
* The callee needs to be careful. */
|
|
|
|
|
_watcher_handle_notify(self, handle, &event_data);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
2020-07-21 11:21:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
typedef struct {
|
|
|
|
|
const char *name;
|
|
|
|
|
guint32 start_addr; /* host byte order */
|
|
|
|
|
guint prefix_len;
|
|
|
|
|
guint num_addrs;
|
|
|
|
|
gboolean allow_reuse;
|
|
|
|
|
} IPReservationTypeDesc;
|
|
|
|
|
|
|
|
|
|
static const IPReservationTypeDesc ip_reservation_types[_NM_NETNS_IP_RESERVATION_TYPE_NUM] = {
|
|
|
|
|
[NM_NETNS_IP_RESERVATION_TYPE_SHARED4] =
|
|
|
|
|
{
|
|
|
|
|
.name = "shared-ip4",
|
|
|
|
|
.start_addr = 0x0a2a0001, /* 10.42.0.1 */
|
|
|
|
|
.prefix_len = 24,
|
|
|
|
|
.num_addrs = 256,
|
|
|
|
|
.allow_reuse = TRUE,
|
|
|
|
|
},
|
|
|
|
|
};
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
NMNetnsIPReservation *
|
|
|
|
|
nm_netns_ip_reservation_get(NMNetns *self, NMNetnsIPReservationType type)
|
|
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv;
|
|
|
|
|
const IPReservationTypeDesc *desc;
|
|
|
|
|
NMNetnsIPReservation *res;
|
|
|
|
|
GHashTable **table;
|
|
|
|
|
in_addr_t addr;
|
|
|
|
|
char buf[NM_INET_ADDRSTRLEN];
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
g_return_val_if_fail(NM_IS_NETNS(self), NULL);
|
2025-09-06 14:46:35 +02:00
|
|
|
g_return_val_if_fail(type < _NM_NETNS_IP_RESERVATION_TYPE_NUM, NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
desc = &ip_reservation_types[type];
|
|
|
|
|
table = &priv->ip_reservation[type];
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
if (!*table) {
|
|
|
|
|
addr = htonl(desc->start_addr);
|
|
|
|
|
*table = g_hash_table_new(nm_puint32_hash, nm_puint32_equal);
|
2020-09-14 15:07:18 +02:00
|
|
|
g_object_ref(self);
|
|
|
|
|
} else {
|
|
|
|
|
guint32 count;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
nm_assert(g_hash_table_size(*table) > 0);
|
|
|
|
|
nm_assert(desc->prefix_len > 0 && desc->prefix_len <= 32);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
count = 0u;
|
|
|
|
|
for (;;) {
|
2025-09-06 14:46:35 +02:00
|
|
|
addr = htonl(desc->start_addr + (count << (32 - desc->prefix_len)));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
res = g_hash_table_lookup(*table, &addr);
|
|
|
|
|
if (!res)
|
2020-09-14 15:07:18 +02:00
|
|
|
break;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
count++;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
if (count >= desc->num_addrs) {
|
|
|
|
|
if (!desc->allow_reuse) {
|
|
|
|
|
_LOGE("%s: ran out of IP addresses", desc->name);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (res->_ref_count == 1) {
|
|
|
|
|
_LOGE("%s: ran out of IP addresses. Reuse %s/%u",
|
|
|
|
|
desc->name,
|
|
|
|
|
nm_inet4_ntop(res->addr, buf),
|
|
|
|
|
desc->prefix_len);
|
2020-09-14 15:07:18 +02:00
|
|
|
} else {
|
2025-09-06 14:46:35 +02:00
|
|
|
_LOGD("%s: reserved IP address %s/%u (duplicate)",
|
|
|
|
|
desc->name,
|
|
|
|
|
nm_inet4_ntop(res->addr, buf),
|
|
|
|
|
desc->prefix_len);
|
2020-09-14 15:07:18 +02:00
|
|
|
}
|
2025-09-06 14:46:35 +02:00
|
|
|
res->_ref_count++;
|
|
|
|
|
return res;
|
2020-09-14 15:07:18 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
res = g_slice_new(NMNetnsIPReservation);
|
|
|
|
|
*res = (NMNetnsIPReservation) {
|
2020-09-14 15:07:18 +02:00
|
|
|
.addr = addr,
|
|
|
|
|
._ref_count = 1,
|
|
|
|
|
._self = self,
|
2025-09-06 14:46:35 +02:00
|
|
|
._type = type,
|
2020-09-14 15:07:18 +02:00
|
|
|
};
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
g_hash_table_add(*table, res);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
_LOGD("%s: reserved IP address %s/%u",
|
|
|
|
|
desc->name,
|
|
|
|
|
nm_inet4_ntop(res->addr, buf),
|
|
|
|
|
desc->prefix_len);
|
|
|
|
|
return res;
|
2020-09-14 15:07:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2025-09-06 14:46:35 +02:00
|
|
|
nm_netns_ip_reservation_release(NMNetnsIPReservation *res)
|
2020-09-14 15:07:18 +02:00
|
|
|
{
|
2025-09-06 14:46:35 +02:00
|
|
|
NMNetns *self;
|
|
|
|
|
NMNetnsPrivate *priv;
|
|
|
|
|
const IPReservationTypeDesc *desc;
|
|
|
|
|
GHashTable **table;
|
|
|
|
|
char buf[NM_INET_ADDRSTRLEN];
|
2020-09-14 15:07:18 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
g_return_if_fail(res);
|
|
|
|
|
g_return_if_fail(res->_type < _NM_NETNS_IP_RESERVATION_TYPE_NUM);
|
2020-09-14 15:07:18 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
self = res->_self;
|
2020-09-14 15:07:18 +02:00
|
|
|
g_return_if_fail(NM_IS_NETNS(self));
|
|
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
desc = &ip_reservation_types[res->_type];
|
|
|
|
|
table = &priv->ip_reservation[res->_type];
|
|
|
|
|
|
|
|
|
|
nm_assert(res->_ref_count > 0);
|
|
|
|
|
nm_assert(res == nm_g_hash_table_lookup(*table, res));
|
|
|
|
|
|
|
|
|
|
if (res->_ref_count > 1) {
|
|
|
|
|
nm_assert(desc->allow_reuse);
|
|
|
|
|
res->_ref_count--;
|
|
|
|
|
_LOGD("%s: release IP address reservation %s/%u (%d more references held)",
|
|
|
|
|
desc->name,
|
|
|
|
|
nm_inet4_ntop(res->addr, buf),
|
|
|
|
|
desc->prefix_len,
|
|
|
|
|
res->_ref_count);
|
2020-09-14 15:07:18 +02:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
if (!g_hash_table_remove(*table, res))
|
2020-09-14 15:07:18 +02:00
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
if (g_hash_table_size(*table) == 0) {
|
|
|
|
|
nm_clear_pointer(table, g_hash_table_unref);
|
2020-09-14 15:07:18 +02:00
|
|
|
g_object_unref(self);
|
|
|
|
|
}
|
|
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
_LOGD("%s: release IP address reservation %s/%u",
|
|
|
|
|
desc->name,
|
|
|
|
|
nm_inet4_ntop(res->addr, buf),
|
|
|
|
|
desc->prefix_len);
|
2020-09-14 15:07:18 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
res->_self = NULL;
|
|
|
|
|
nm_g_slice_free(res);
|
2020-09-14 15:07:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
void
|
|
|
|
|
nm_netns_ip_route_ecmp_register(NMNetns *self, NML3Cfg *l3cfg, const NMPObject *obj)
|
|
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv;
|
|
|
|
|
EcmpTrackObj *track_obj;
|
|
|
|
|
const NMPlatformIP4Route *route;
|
|
|
|
|
char sbuf[NM_UTILS_TO_STRING_BUFFER_SIZE];
|
|
|
|
|
|
|
|
|
|
nm_assert_l3cfg(self, l3cfg);
|
|
|
|
|
|
|
|
|
|
route = NMP_OBJECT_CAST_IP4_ROUTE(obj);
|
|
|
|
|
|
|
|
|
|
nm_assert(route->ifindex > 0);
|
|
|
|
|
nm_assert(route->ifindex == nm_l3cfg_get_ifindex(l3cfg));
|
|
|
|
|
nm_assert(route->n_nexthops <= 1);
|
|
|
|
|
|
|
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
track_obj = g_hash_table_lookup(priv->ecmp_track_by_obj, &obj);
|
|
|
|
|
|
|
|
|
|
if (NM_MORE_ASSERTS > 10) {
|
|
|
|
|
EcmpTrackObj *track_obj2;
|
|
|
|
|
gboolean found = FALSE;
|
|
|
|
|
|
|
|
|
|
c_list_for_each_entry (track_obj2,
|
|
|
|
|
&l3cfg->internal_netns.ecmp_track_ifindex_lst_head,
|
|
|
|
|
ifindex_lst) {
|
|
|
|
|
if (track_obj2->obj == obj) {
|
|
|
|
|
found = TRUE;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nm_assert((!!track_obj) == found);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!track_obj) {
|
|
|
|
|
EcmpTrackEcmpid *track_ecmpid;
|
|
|
|
|
|
|
|
|
|
track_ecmpid = g_hash_table_lookup(priv->ecmp_track_by_ecmpid, &obj);
|
|
|
|
|
if (!track_ecmpid) {
|
|
|
|
|
track_ecmpid = g_slice_new(EcmpTrackEcmpid);
|
2024-10-04 10:45:56 +02:00
|
|
|
*track_ecmpid = (EcmpTrackEcmpid) {
|
2022-11-23 08:29:48 +01:00
|
|
|
.representative_obj = nmp_object_ref(obj),
|
|
|
|
|
.merged_obj = NULL,
|
|
|
|
|
.ecmpid_lst_head = C_LIST_INIT(track_ecmpid->ecmpid_lst_head),
|
|
|
|
|
.needs_update = TRUE,
|
|
|
|
|
};
|
|
|
|
|
g_hash_table_add(priv->ecmp_track_by_ecmpid, track_ecmpid);
|
|
|
|
|
} else
|
|
|
|
|
track_ecmpid->needs_update = TRUE;
|
|
|
|
|
|
|
|
|
|
track_obj = g_slice_new(EcmpTrackObj);
|
2024-10-04 10:45:56 +02:00
|
|
|
*track_obj = (EcmpTrackObj) {
|
2022-11-23 08:29:48 +01:00
|
|
|
.obj = nmp_object_ref(obj),
|
|
|
|
|
.l3cfg = l3cfg,
|
|
|
|
|
.parent_track_ecmpid = track_ecmpid,
|
|
|
|
|
.dirty = FALSE,
|
2023-01-31 10:30:04 +01:00
|
|
|
.is_new = TRUE,
|
|
|
|
|
.is_ready = FALSE,
|
2022-11-23 08:29:48 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
g_hash_table_add(priv->ecmp_track_by_obj, track_obj);
|
|
|
|
|
c_list_link_tail(&l3cfg->internal_netns.ecmp_track_ifindex_lst_head,
|
|
|
|
|
&track_obj->ifindex_lst);
|
|
|
|
|
c_list_link_tail(&track_ecmpid->ecmpid_lst_head, &track_obj->ecmpid_lst);
|
|
|
|
|
|
|
|
|
|
_LOGT(
|
|
|
|
|
"ecmp-route: track %s",
|
|
|
|
|
nmp_object_to_string(track_obj->obj, NMP_OBJECT_TO_STRING_PUBLIC, sbuf, sizeof(sbuf)));
|
2022-12-22 17:58:12 +01:00
|
|
|
} else {
|
|
|
|
|
track_obj->dirty = FALSE;
|
|
|
|
|
track_obj->parent_track_ecmpid->needs_update = TRUE;
|
|
|
|
|
}
|
2022-11-23 08:29:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2023-01-31 10:30:04 +01:00
|
|
|
nm_netns_ip_route_ecmp_commit(NMNetns *self,
|
|
|
|
|
NML3Cfg *l3cfg,
|
|
|
|
|
GPtrArray **out_singlehop_routes,
|
|
|
|
|
gboolean is_reapply)
|
2022-11-23 08:29:48 +01:00
|
|
|
{
|
2023-02-01 08:37:51 +01:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
EcmpTrackObj *track_obj;
|
|
|
|
|
EcmpTrackObj *track_obj_safe;
|
|
|
|
|
EcmpTrackEcmpid *track_ecmpid;
|
|
|
|
|
const NMPObject *route_obj;
|
|
|
|
|
char sbuf[NM_UTILS_TO_STRING_BUFFER_SIZE];
|
|
|
|
|
gboolean already_notified = FALSE;
|
2022-11-23 08:29:48 +01:00
|
|
|
|
|
|
|
|
nm_assert_l3cfg(self, l3cfg);
|
|
|
|
|
|
l3cfg: schedule an update after every commit-type/config-data register/unregister
When we register/unregister a commit-type or when we add/remove a
config-data to NML3Cfg, that act only does the registration/addition.
Only on the next commit, are the changes actually done. The purpose
of that is to add/register multiple configurations and commit them later
when ready.
However, it would be wrong to not do the commit a short time after. The
configuration state is dirty with need to be committed, and that should
happen soon.
Worse, when a interface disappears, NMDevice will clear the ifindex and
the NML3Cfg instance, thereby unregistering all config data and commit
type. If we previously commited something, we need to do another follow-up
commit to cleanup that state.
That is for example important with ECMP routes, which are registered in
NMNetns. When NML3Cfg goes down, it always must unregister to properly
cleanup. Failure to do so, causes an assertion failure and crash. This
change fixes that.
Fix that by automatically schedule and idle commit on
register/unregister/add/remove of commit-type/config-data.
It should *always* be permissible to call a AUTO commit from
an idle handler, because various parties cannot use NML3Cfg
independently, and they cannot know when somebody else does a
commit.
Note that NML3Cfg remembers if it presiouvly did a commit
("commit_type_update_sticky"), so even if the last commit-type gets
unregistered, the next commit will still do a sticky update (one more
time).
The only remaining question is what happens during quitting. When
quitting, NetworkManager we may want to leave some interfaces up and
configured. If we were to properly cleanup the NML3Cfg we might need a
mechanism to handle that. However, currently we just leak everything
during quit, so that is not a concern now. It is something that needs
to be addressed in the future.
https://bugzilla.redhat.com/show_bug.cgi?id=2158394
https://gitlab.freedesktop.org/NetworkManager/NetworkManager/-/merge_requests/1505
2023-01-17 17:41:38 +01:00
|
|
|
_LOGT("ecmp-route: committing IPv4 ECMP routes");
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
/* First, delete all dirty entries, and mark the survivors as dirty, so that on the
|
|
|
|
|
* next update they must be touched again. */
|
|
|
|
|
c_list_for_each_entry_safe (track_obj,
|
|
|
|
|
track_obj_safe,
|
|
|
|
|
&l3cfg->internal_netns.ecmp_track_ifindex_lst_head,
|
|
|
|
|
ifindex_lst) {
|
|
|
|
|
track_ecmpid = track_obj->parent_track_ecmpid;
|
|
|
|
|
track_ecmpid->already_visited = FALSE;
|
|
|
|
|
|
core: fix crash in nm_netns_ip_route_ecmp_commit()
#0 0x00000000004c53e0 in nm_netns_ip_route_ecmp_commit (self=0x27bde30, l3cfg=l3cfg@entry=0x2890810, out_singlehop_routes=out_singlehop_routes@entry=0x7ffd0cac3ce8)
at src/core/nm-netns.c:686
#1 0x00000000004b4335 in _commit_collect_routes
(self=self@entry=0x2890810, addr_family=addr_family@entry=2, commit_type=commit_type@entry=NM_L3_CFG_COMMIT_TYPE_UPDATE, routes=routes@entry=0x7ffd0cac3de8, routes_nodev=routes_nodev@entry=0x7ffd0cac3de0) at src/core/nm-l3cfg.c:1183
#2 0x00000000004b8982 in _l3_commit_one
(self=self@entry=0x2890810, addr_family=addr_family@entry=2, commit_type=commit_type@entry=NM_L3_CFG_COMMIT_TYPE_UPDATE, changed_combined_l3cd=<optimized out>, l3cd_old=<optimized out>) at src/core/nm-l3cfg.c:4605
#3 0x00000000004c0f52 in _l3_commit (self=self@entry=0x2890810, commit_type=NM_L3_CFG_COMMIT_TYPE_UPDATE, commit_type@entry=NM_L3_CFG_COMMIT_TYPE_AUTO, is_idle=is_idle@entry=1)
at src/core/nm-l3cfg.c:4786
#4 0x00000000004c11cb in _l3_commit_on_idle_cb (user_data=user_data@entry=0x2890810) at src/core/nm-l3cfg.c:3164
#5 0x00007f532d02dcb2 in g_idle_dispatch (source=0x28f70c0, callback=0x4c116e <_l3_commit_on_idle_cb>, user_data=0x2890810) at ../glib/gmain.c:6124
#6 0x00007f532d02ecbf in g_main_dispatch (context=0x27c2d60) at ../glib/gmain.c:3444
https://bugzilla.redhat.com/show_bug.cgi?id=2158365
Fixes: 5b5ce4268211 ('nm-netns: track ECMP routes')
2023-01-05 10:29:07 +01:00
|
|
|
nm_assert(g_hash_table_lookup(priv->ecmp_track_by_ecmpid, track_ecmpid) == track_ecmpid);
|
|
|
|
|
nm_assert(g_hash_table_lookup(priv->ecmp_track_by_obj, track_obj) == track_obj);
|
|
|
|
|
nm_assert(c_list_contains(&track_ecmpid->ecmpid_lst_head, &track_obj->ecmpid_lst));
|
|
|
|
|
nm_assert(track_obj->l3cfg == l3cfg);
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
if (!track_obj->dirty) {
|
|
|
|
|
/* This one is still in used. Keep it, but mark dirty, so that on the
|
|
|
|
|
* next update cycle, it needs to be touched again or will be deleted. */
|
|
|
|
|
track_obj->dirty = TRUE;
|
2023-01-31 10:30:04 +01:00
|
|
|
if (is_reapply) {
|
|
|
|
|
track_obj->is_new = TRUE;
|
|
|
|
|
track_obj->is_ready = FALSE;
|
|
|
|
|
}
|
|
|
|
|
if (track_obj->is_new) {
|
2023-02-01 08:37:51 +01:00
|
|
|
const NMPlatformIP4Route *route =
|
|
|
|
|
NMP_OBJECT_CAST_IP4_ROUTE(track_ecmpid->merged_obj);
|
|
|
|
|
|
2023-01-31 10:30:04 +01:00
|
|
|
/* This is a new route entry that was just added. Upon first
|
|
|
|
|
* addition, the route is not yet ready for configuration,
|
|
|
|
|
* because we need to make sure that the gateway is reachable
|
|
|
|
|
* via an onlink route. The calling l3cfg will configure that
|
|
|
|
|
* route, but only after returning from this function. So we
|
|
|
|
|
* need to go through one more commit.
|
|
|
|
|
*
|
|
|
|
|
* We also need to make sure that we are called back right
|
|
|
|
|
* after l3cfg configured that route. We achieve that by
|
|
|
|
|
* scheduling another idle commit on "l3cfg". */
|
|
|
|
|
track_obj->is_new = FALSE;
|
2023-02-01 08:58:15 +01:00
|
|
|
if (route
|
|
|
|
|
&& (route->gateway == 0
|
|
|
|
|
|| NM_FLAGS_HAS(route->r_rtm_flags, (unsigned) RTNH_F_ONLINK))) {
|
2023-02-01 08:37:51 +01:00
|
|
|
/* This route is onlink. We don't need to configure an onlink route
|
|
|
|
|
* to the gateway, and the route is immediately ready for configuration. */
|
|
|
|
|
track_obj->is_ready = TRUE;
|
2023-03-07 10:55:12 +01:00
|
|
|
} else if (c_list_is_empty_or_single(&track_ecmpid->ecmpid_lst_head)) {
|
2023-01-31 10:30:04 +01:00
|
|
|
/* This route has no merge partner and ends up being a
|
|
|
|
|
* single hop route. It will be returned and configured by
|
|
|
|
|
* the calling "l3cfg".
|
|
|
|
|
*
|
|
|
|
|
* Unlike for multi-hop routes, we don't need to be called
|
|
|
|
|
* again after the onlink route was added. We are done, and
|
|
|
|
|
* don't need to schedule an idle commit. */
|
|
|
|
|
track_obj->is_ready = TRUE;
|
|
|
|
|
} else {
|
2023-02-01 08:37:51 +01:00
|
|
|
/* This is a new route which has a gateway. We need for the "l3cfg"
|
|
|
|
|
* to first configure the onlink route. It's not yet ready for configuration.
|
|
|
|
|
*
|
|
|
|
|
* Instead, schedule an idle commit to make sure we get called back
|
|
|
|
|
* again, and then (upon seeing the entry the second time) the onlink
|
|
|
|
|
* route is already configured and we will be ready. */
|
2023-01-31 10:30:04 +01:00
|
|
|
if (!already_notified) {
|
2023-02-01 08:37:51 +01:00
|
|
|
/* Some micro optimization with already_notified to avoid calling
|
|
|
|
|
* schedule unnecessarily. */
|
2023-01-31 10:30:04 +01:00
|
|
|
already_notified = TRUE;
|
|
|
|
|
nm_l3cfg_commit_on_idle_schedule(l3cfg, NM_L3_CFG_COMMIT_TYPE_AUTO);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/* We see this entry the second time (or more) so it's ready. */
|
|
|
|
|
track_obj->is_ready = TRUE;
|
|
|
|
|
}
|
2022-11-23 08:29:48 +01:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
core: fix crash in nm_netns_ip_route_ecmp_commit()
#0 0x00000000004c53e0 in nm_netns_ip_route_ecmp_commit (self=0x27bde30, l3cfg=l3cfg@entry=0x2890810, out_singlehop_routes=out_singlehop_routes@entry=0x7ffd0cac3ce8)
at src/core/nm-netns.c:686
#1 0x00000000004b4335 in _commit_collect_routes
(self=self@entry=0x2890810, addr_family=addr_family@entry=2, commit_type=commit_type@entry=NM_L3_CFG_COMMIT_TYPE_UPDATE, routes=routes@entry=0x7ffd0cac3de8, routes_nodev=routes_nodev@entry=0x7ffd0cac3de0) at src/core/nm-l3cfg.c:1183
#2 0x00000000004b8982 in _l3_commit_one
(self=self@entry=0x2890810, addr_family=addr_family@entry=2, commit_type=commit_type@entry=NM_L3_CFG_COMMIT_TYPE_UPDATE, changed_combined_l3cd=<optimized out>, l3cd_old=<optimized out>) at src/core/nm-l3cfg.c:4605
#3 0x00000000004c0f52 in _l3_commit (self=self@entry=0x2890810, commit_type=NM_L3_CFG_COMMIT_TYPE_UPDATE, commit_type@entry=NM_L3_CFG_COMMIT_TYPE_AUTO, is_idle=is_idle@entry=1)
at src/core/nm-l3cfg.c:4786
#4 0x00000000004c11cb in _l3_commit_on_idle_cb (user_data=user_data@entry=0x2890810) at src/core/nm-l3cfg.c:3164
#5 0x00007f532d02dcb2 in g_idle_dispatch (source=0x28f70c0, callback=0x4c116e <_l3_commit_on_idle_cb>, user_data=0x2890810) at ../glib/gmain.c:6124
#6 0x00007f532d02ecbf in g_main_dispatch (context=0x27c2d60) at ../glib/gmain.c:3444
https://bugzilla.redhat.com/show_bug.cgi?id=2158365
Fixes: 5b5ce4268211 ('nm-netns: track ECMP routes')
2023-01-05 10:29:07 +01:00
|
|
|
/* This entry can be dropped. */
|
|
|
|
|
if (!g_hash_table_remove(priv->ecmp_track_by_obj, track_obj))
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
if (c_list_is_empty(&track_ecmpid->ecmpid_lst_head)) {
|
|
|
|
|
if (track_ecmpid->merged_obj) {
|
2023-01-31 10:30:04 +01:00
|
|
|
if (NMP_OBJECT_CAST_IP4_ROUTE(track_ecmpid->merged_obj)->n_nexthops > 1)
|
|
|
|
|
nm_platform_object_delete(priv->platform, track_ecmpid->merged_obj);
|
2022-11-23 08:29:48 +01:00
|
|
|
}
|
|
|
|
|
g_hash_table_remove(priv->ecmp_track_by_ecmpid, track_ecmpid);
|
2022-12-23 12:48:32 +01:00
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We need to update the representative obj. */
|
|
|
|
|
nmp_object_ref_set(
|
|
|
|
|
&track_ecmpid->representative_obj,
|
|
|
|
|
c_list_first_entry(&track_ecmpid->ecmpid_lst_head, EcmpTrackObj, ecmpid_lst)->obj);
|
|
|
|
|
track_ecmpid->needs_update = TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Now, we need to iterate again over all objects, and regenerate the merged_obj. */
|
|
|
|
|
c_list_for_each_entry (track_obj,
|
|
|
|
|
&l3cfg->internal_netns.ecmp_track_ifindex_lst_head,
|
|
|
|
|
ifindex_lst) {
|
2023-02-01 08:37:51 +01:00
|
|
|
const NMPlatformIP4Route *route;
|
2023-01-31 10:30:04 +01:00
|
|
|
EcmpTrackObj *track_obj2;
|
2022-11-23 08:29:48 +01:00
|
|
|
nm_auto_nmpobj const NMPObject *obj_del = NULL;
|
|
|
|
|
gboolean changed;
|
2023-01-31 10:30:04 +01:00
|
|
|
gboolean all_is_ready;
|
2022-11-23 08:29:48 +01:00
|
|
|
|
|
|
|
|
track_ecmpid = track_obj->parent_track_ecmpid;
|
|
|
|
|
if (track_ecmpid->already_visited) {
|
|
|
|
|
/* We already visited this ecmpid in the same loop. We can skip, otherwise
|
|
|
|
|
* we might add the same route twice. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
track_ecmpid->already_visited = TRUE;
|
|
|
|
|
|
2023-01-31 10:30:04 +01:00
|
|
|
all_is_ready = TRUE;
|
|
|
|
|
c_list_for_each_entry (track_obj2, &track_ecmpid->ecmpid_lst_head, ecmpid_lst) {
|
|
|
|
|
if (!track_obj2->is_ready) {
|
|
|
|
|
all_is_ready = FALSE;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!all_is_ready) {
|
|
|
|
|
/* Here we might have a merged_obj already which can have the wrong
|
|
|
|
|
* setting e.g the wrong nexthops. We leave them for the moment and
|
|
|
|
|
* then we reconfigure it when this entry is ready. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
changed = _ecmp_track_init_merged_obj(track_obj->parent_track_ecmpid, &obj_del);
|
|
|
|
|
|
|
|
|
|
nm_assert(!obj_del || changed);
|
|
|
|
|
|
2023-01-31 10:30:04 +01:00
|
|
|
route_obj = track_ecmpid->merged_obj;
|
2022-11-23 08:29:48 +01:00
|
|
|
route = NMP_OBJECT_CAST_IP4_ROUTE(route_obj);
|
|
|
|
|
|
|
|
|
|
if (obj_del) {
|
2023-01-31 10:30:04 +01:00
|
|
|
if (NMP_OBJECT_CAST_IP4_ROUTE(obj_del)->n_nexthops > 1)
|
|
|
|
|
nm_platform_object_delete(priv->platform, obj_del);
|
2023-10-18 23:29:55 +02:00
|
|
|
else if (NMP_OBJECT_CAST_IP4_ROUTE(obj_del)->ifindex != nm_l3cfg_get_ifindex(l3cfg)) {
|
|
|
|
|
/* A single-hop route from a different interface was merged
|
|
|
|
|
* into a ECMP route. Now, it is time to notify the l3cfg that
|
|
|
|
|
* is managing that single-hop route to remove it. */
|
|
|
|
|
nm_l3cfg_commit_on_idle_schedule(
|
|
|
|
|
nm_netns_l3cfg_get(self, NMP_OBJECT_CAST_IP4_ROUTE(obj_del)->ifindex),
|
|
|
|
|
NM_L3_CFG_COMMIT_TYPE_UPDATE);
|
|
|
|
|
}
|
2022-11-23 08:29:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (route->n_nexthops <= 1) {
|
2023-11-28 15:20:33 +01:00
|
|
|
NMPObject *route_clone;
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
/* This is a single hop route. Return it to the caller. */
|
|
|
|
|
if (!*out_singlehop_routes) {
|
|
|
|
|
/* Note that the returned array does not own a reference. This
|
|
|
|
|
* function has only one caller, and for that caller, it's just
|
|
|
|
|
* fine that the result is not additionally kept alive. */
|
|
|
|
|
*out_singlehop_routes =
|
|
|
|
|
g_ptr_array_new_with_free_func((GDestroyNotify) nmp_object_unref);
|
|
|
|
|
}
|
2023-11-28 15:20:33 +01:00
|
|
|
|
|
|
|
|
/* We have here a IPv4 single-hop route. For internal tracking purposes,
|
|
|
|
|
* this route has a positive "weight" (which was used to mark it as a candidate
|
|
|
|
|
* for ECMP merging). Now we want to return this route to NML3Cfg and add it
|
|
|
|
|
* as regular single-hop routes.
|
|
|
|
|
*
|
|
|
|
|
* A single-hop route in kernel always has a "weight" of zero. This route
|
|
|
|
|
* cannot be added as-is. Well, if we would, then the result would be
|
|
|
|
|
* a different(!) route (with a zero "weight").
|
|
|
|
|
*
|
|
|
|
|
* Anticipate that and normalize the route now to be a regular single-hop
|
|
|
|
|
* route (with weight zero). nm_platform_ip_route_normalize() does that.
|
|
|
|
|
* We really want to return a regular route here, not the route with a positive
|
|
|
|
|
* weight that exists for internal tracking purposes.
|
|
|
|
|
*/
|
|
|
|
|
nm_assert(NMP_OBJECT_GET_TYPE(route_obj) == NMP_OBJECT_TYPE_IP4_ROUTE);
|
|
|
|
|
nm_assert(route_obj->ip4_route.weight > 0u);
|
|
|
|
|
|
|
|
|
|
route_clone = nmp_object_clone(route_obj, FALSE);
|
|
|
|
|
nm_platform_ip_route_normalize(AF_INET, NMP_OBJECT_CAST_IP_ROUTE(route_clone));
|
|
|
|
|
g_ptr_array_add(*out_singlehop_routes, route_clone);
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
if (changed) {
|
|
|
|
|
_LOGT("ecmp-route: single-hop %s",
|
|
|
|
|
nmp_object_to_string(route_obj,
|
|
|
|
|
NMP_OBJECT_TO_STRING_PUBLIC,
|
|
|
|
|
sbuf,
|
|
|
|
|
sizeof(sbuf)));
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-31 10:30:04 +01:00
|
|
|
if (changed || is_reapply) {
|
2022-11-23 08:29:48 +01:00
|
|
|
_LOGT("ecmp-route: multi-hop %s",
|
|
|
|
|
nmp_object_to_string(route_obj, NMP_OBJECT_TO_STRING_PUBLIC, sbuf, sizeof(sbuf)));
|
2023-02-17 12:07:05 +01:00
|
|
|
nm_platform_ip_route_add(priv->platform, NMP_NLM_FLAG_APPEND, route_obj, NULL);
|
2022-11-23 08:29:48 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2023-03-06 15:32:16 +01:00
|
|
|
static void
|
|
|
|
|
_watcher_data_set(NMNetnsWatcherData *dst,
|
|
|
|
|
NMNetnsWatcherType watcher_type,
|
|
|
|
|
const NMNetnsWatcherData *src)
|
|
|
|
|
{
|
|
|
|
|
nm_assert(dst);
|
|
|
|
|
nm_assert(src);
|
|
|
|
|
|
|
|
|
|
switch (watcher_type) {
|
|
|
|
|
case NM_NETNS_WATCHER_TYPE_IP_ADDR:
|
|
|
|
|
dst->ip_addr = src->ip_addr;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_watcher_data_hash(NMHashState *h, NMNetnsWatcherType watcher_type, const NMNetnsWatcherData *data)
|
|
|
|
|
{
|
|
|
|
|
nm_assert(h);
|
|
|
|
|
nm_assert(NM_NETNS_WATCHER_TYPE_VALID(watcher_type));
|
|
|
|
|
nm_assert(data);
|
|
|
|
|
|
|
|
|
|
switch (watcher_type) {
|
|
|
|
|
case NM_NETNS_WATCHER_TYPE_IP_ADDR:
|
|
|
|
|
nm_ip_addr_typed_hash_update(h, &data->ip_addr.addr);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_watcher_data_equal(NMNetnsWatcherType watcher_type,
|
|
|
|
|
const NMNetnsWatcherData *a,
|
|
|
|
|
const NMNetnsWatcherData *b)
|
|
|
|
|
{
|
|
|
|
|
nm_assert(NM_NETNS_WATCHER_TYPE_VALID(watcher_type));
|
|
|
|
|
nm_assert(a);
|
|
|
|
|
nm_assert(b);
|
|
|
|
|
|
|
|
|
|
switch (watcher_type) {
|
|
|
|
|
case NM_NETNS_WATCHER_TYPE_IP_ADDR:
|
|
|
|
|
return nm_ip_addr_typed_equal(&a->ip_addr.addr, &b->ip_addr.addr);
|
|
|
|
|
}
|
|
|
|
|
return nm_assert_unreachable_val(FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_watcher_by_tag_destroy(WatcherByTag *watcher_by_tag)
|
|
|
|
|
{
|
|
|
|
|
c_list_unlink_stale(&watcher_by_tag->watcher_by_tag_lst_head);
|
|
|
|
|
nm_g_slice_free(watcher_by_tag);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_watcher_handle_init(NMNetnsWatcherHandle *handle,
|
|
|
|
|
NMNetnsWatcherType watcher_type,
|
|
|
|
|
const NMNetnsWatcherData *watcher_data,
|
|
|
|
|
gconstpointer tag)
|
|
|
|
|
{
|
|
|
|
|
nm_assert(handle);
|
|
|
|
|
nm_assert(NM_NETNS_WATCHER_TYPE_VALID(watcher_type));
|
|
|
|
|
|
2024-10-04 10:45:56 +02:00
|
|
|
*handle = (NMNetnsWatcherHandle) {
|
2023-03-06 15:32:16 +01:00
|
|
|
.watcher_type = watcher_type,
|
|
|
|
|
.tag = tag,
|
|
|
|
|
.watcher_tag_lst = C_LIST_INIT(handle->watcher_tag_lst),
|
|
|
|
|
};
|
|
|
|
|
_watcher_data_set(&handle->watcher_data, watcher_type, watcher_data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static guint
|
|
|
|
|
_watcher_handle_hash(gconstpointer data)
|
|
|
|
|
{
|
|
|
|
|
const NMNetnsWatcherHandle *watcher = data;
|
|
|
|
|
NMHashState h;
|
|
|
|
|
|
|
|
|
|
nm_assert(watcher);
|
|
|
|
|
nm_assert(watcher->tag);
|
|
|
|
|
|
|
|
|
|
nm_hash_init(&h, 2696278447u);
|
|
|
|
|
nm_hash_update_vals(&h, watcher->tag, watcher->watcher_type);
|
|
|
|
|
_watcher_data_hash(&h, watcher->watcher_type, &watcher->watcher_data);
|
|
|
|
|
return nm_hash_complete(&h);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_watcher_handle_equal(gconstpointer a, gconstpointer b)
|
|
|
|
|
{
|
|
|
|
|
const NMNetnsWatcherHandle *ha = a;
|
|
|
|
|
const NMNetnsWatcherHandle *hb = b;
|
|
|
|
|
|
|
|
|
|
nm_assert(ha);
|
|
|
|
|
nm_assert(hb);
|
|
|
|
|
nm_assert(ha->tag);
|
|
|
|
|
nm_assert(hb->tag);
|
|
|
|
|
|
|
|
|
|
if (ha == hb)
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
|
|
return (ha->tag == hb->tag) && (ha->watcher_type == hb->watcher_type)
|
|
|
|
|
&& _watcher_data_equal(ha->watcher_type, &ha->watcher_data, &hb->watcher_data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
|
_watcher_handle_to_string(const NMNetnsWatcherHandle *handle, char *buf, gsize buf_size)
|
|
|
|
|
{
|
|
|
|
|
const char *buf0 = buf;
|
|
|
|
|
char sbuf[NM_INET_ADDRSTRLEN];
|
|
|
|
|
|
|
|
|
|
nm_strbuf_append(&buf,
|
|
|
|
|
&buf_size,
|
|
|
|
|
"h:" NM_HASH_OBFUSCATE_PTR_FMT "[",
|
|
|
|
|
NM_HASH_OBFUSCATE_PTR(handle));
|
|
|
|
|
|
|
|
|
|
if (handle->tag) {
|
|
|
|
|
nm_strbuf_append(&buf,
|
|
|
|
|
&buf_size,
|
|
|
|
|
"tag:" NM_HASH_OBFUSCATE_PTR_FMT ",",
|
|
|
|
|
NM_HASH_OBFUSCATE_PTR(handle->tag));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (handle->watcher_type) {
|
|
|
|
|
case NM_NETNS_WATCHER_TYPE_IP_ADDR:
|
|
|
|
|
nm_strbuf_append_str(&buf, &buf_size, "ip-addr:");
|
|
|
|
|
nm_strbuf_append_str(&buf,
|
|
|
|
|
&buf_size,
|
|
|
|
|
nm_inet_ntop(handle->watcher_data.ip_addr.addr.addr_family,
|
|
|
|
|
&handle->watcher_data.ip_addr.addr.addr,
|
|
|
|
|
sbuf));
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
nm_strbuf_append_str(&buf, &buf_size, "unknown");
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
nm_strbuf_append_c(&buf, &buf_size, ']');
|
|
|
|
|
return buf0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_watcher_handle_notify(NMNetns *self,
|
|
|
|
|
NMNetnsWatcherHandle *handle,
|
|
|
|
|
const NMNetnsWatcherEventData *event_data)
|
|
|
|
|
{
|
|
|
|
|
nm_assert(NM_IS_NETNS(self));
|
|
|
|
|
nm_assert(handle);
|
|
|
|
|
nm_assert(handle->callback);
|
|
|
|
|
|
|
|
|
|
handle->callback(self,
|
|
|
|
|
handle->watcher_type,
|
|
|
|
|
&handle->watcher_data,
|
|
|
|
|
handle->tag,
|
|
|
|
|
event_data,
|
|
|
|
|
handle->callback_user_data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static WatcherDataIPAddr *
|
|
|
|
|
_watcher_ip_data_lookup(NMNetns *self, int addr_family, gconstpointer addr)
|
|
|
|
|
{
|
|
|
|
|
WatcherDataIPAddr needle;
|
|
|
|
|
|
|
|
|
|
needle.addr.addr_family = addr_family;
|
|
|
|
|
nm_ip_addr_set(addr_family, &needle.addr.addr, addr);
|
|
|
|
|
return g_hash_table_lookup(NM_NETNS_GET_PRIVATE(self)->watcher_ip_data_idx, &needle);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static WatcherDataIPAddr *
|
|
|
|
|
_watcher_ip_data_lookup_addr(NMNetns *self, const NMIPAddrTyped *addr)
|
|
|
|
|
{
|
|
|
|
|
return _watcher_ip_data_lookup(self, addr->addr_family, &addr->addr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static guint
|
|
|
|
|
_watcher_ip_data_hash(gconstpointer _data)
|
|
|
|
|
{
|
|
|
|
|
const WatcherDataIPAddr *data = _data;
|
|
|
|
|
NMHashState h;
|
|
|
|
|
|
|
|
|
|
nm_assert(data);
|
|
|
|
|
|
|
|
|
|
nm_hash_init(&h, 3152126191u);
|
|
|
|
|
nm_ip_addr_typed_hash_update(&h, &data->addr);
|
|
|
|
|
return nm_hash_complete(&h);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_watcher_ip_data_equal(gconstpointer a, gconstpointer b)
|
|
|
|
|
{
|
|
|
|
|
const WatcherDataIPAddr *data_a = a;
|
|
|
|
|
const WatcherDataIPAddr *data_b = b;
|
|
|
|
|
|
|
|
|
|
nm_assert(data_a);
|
|
|
|
|
nm_assert(data_b);
|
|
|
|
|
|
|
|
|
|
return nm_ip_addr_typed_equal(&data_a->addr, &data_b->addr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static NMNetnsWatcherHandle *
|
|
|
|
|
_watcher_lookup_handle(NMNetns *self,
|
|
|
|
|
NMNetnsWatcherType watcher_type,
|
|
|
|
|
const NMNetnsWatcherData *watcher_data,
|
|
|
|
|
gconstpointer tag)
|
|
|
|
|
{
|
|
|
|
|
NMNetnsWatcherHandle handle_needle;
|
|
|
|
|
|
|
|
|
|
nm_assert(NM_IS_NETNS(self));
|
|
|
|
|
nm_assert(tag);
|
|
|
|
|
|
|
|
|
|
_watcher_handle_init(&handle_needle, watcher_type, watcher_data, tag);
|
|
|
|
|
return g_hash_table_lookup(NM_NETNS_GET_PRIVATE(self)->watcher_idx, &handle_needle);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_watcher_register_handle(NMNetns *self, NMNetnsWatcherHandle *handle)
|
|
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
switch (handle->watcher_type) {
|
|
|
|
|
case NM_NETNS_WATCHER_TYPE_IP_ADDR:
|
|
|
|
|
{
|
|
|
|
|
WatcherDataIPAddr *data;
|
|
|
|
|
|
|
|
|
|
data = _watcher_ip_data_lookup_addr(self, &handle->watcher_data.ip_addr.addr);
|
|
|
|
|
if (!data) {
|
|
|
|
|
data = g_slice_new(WatcherDataIPAddr);
|
2024-10-04 10:45:56 +02:00
|
|
|
*data = (WatcherDataIPAddr) {
|
2023-03-06 15:32:16 +01:00
|
|
|
.addr = handle->watcher_data.ip_addr.addr,
|
|
|
|
|
.watcher_ip_addr_lst_head = C_LIST_INIT(data->watcher_ip_addr_lst_head),
|
|
|
|
|
};
|
|
|
|
|
if (!g_hash_table_add(priv->watcher_ip_data_idx, data))
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
c_list_link_tail(&data->watcher_ip_addr_lst_head,
|
|
|
|
|
&handle->reg_data.ip_addr.watcher_ip_addr_lst);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_watcher_unregister_handle(NMNetns *self, NMNetnsWatcherHandle *handle)
|
|
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
switch (handle->watcher_type) {
|
|
|
|
|
case NM_NETNS_WATCHER_TYPE_IP_ADDR:
|
|
|
|
|
{
|
|
|
|
|
gboolean is_last;
|
|
|
|
|
|
|
|
|
|
nm_assert(({
|
|
|
|
|
WatcherDataIPAddr *d;
|
|
|
|
|
|
|
|
|
|
d = _watcher_ip_data_lookup_addr(self, &handle->watcher_data.ip_addr.addr);
|
|
|
|
|
d &&c_list_contains(&d->watcher_ip_addr_lst_head,
|
|
|
|
|
&handle->reg_data.ip_addr.watcher_ip_addr_lst);
|
|
|
|
|
}));
|
|
|
|
|
|
|
|
|
|
is_last = c_list_is_empty_or_single(&handle->reg_data.ip_addr.watcher_ip_addr_lst);
|
|
|
|
|
|
|
|
|
|
c_list_unlink(&handle->reg_data.ip_addr.watcher_ip_addr_lst);
|
|
|
|
|
|
|
|
|
|
if (is_last) {
|
|
|
|
|
WatcherDataIPAddr *data;
|
|
|
|
|
|
|
|
|
|
data = _watcher_ip_data_lookup_addr(self, &handle->watcher_data.ip_addr.addr);
|
|
|
|
|
nm_assert(data);
|
|
|
|
|
nm_assert(c_list_is_empty(&data->watcher_ip_addr_lst_head));
|
|
|
|
|
|
|
|
|
|
if (!g_hash_table_remove(priv->watcher_ip_data_idx, data))
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
|
|
|
|
nm_g_slice_free(data);
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
void
|
2023-03-06 15:32:16 +01:00
|
|
|
nm_netns_watcher_add(NMNetns *self,
|
|
|
|
|
NMNetnsWatcherType watcher_type,
|
|
|
|
|
const NMNetnsWatcherData *watcher_data,
|
|
|
|
|
gconstpointer tag,
|
|
|
|
|
NMNetnsWatcherCallback callback,
|
|
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv;
|
|
|
|
|
NMNetnsWatcherHandle *handle;
|
|
|
|
|
gboolean is_new = FALSE;
|
|
|
|
|
char sbuf[500];
|
|
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
g_return_if_fail(NM_IS_NETNS(self));
|
|
|
|
|
g_return_if_fail(NM_NETNS_WATCHER_TYPE_VALID(watcher_type));
|
|
|
|
|
g_return_if_fail(callback);
|
|
|
|
|
g_return_if_fail(tag);
|
2023-03-06 15:32:16 +01:00
|
|
|
|
|
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
handle = _watcher_lookup_handle(self, watcher_type, watcher_data, tag);
|
2023-03-06 15:32:16 +01:00
|
|
|
|
|
|
|
|
if (!handle) {
|
2023-03-09 07:45:27 +01:00
|
|
|
WatcherByTag *watcher_by_tag;
|
|
|
|
|
|
|
|
|
|
if (G_UNLIKELY(g_hash_table_size(priv->watcher_idx) == 0))
|
2023-03-06 15:32:16 +01:00
|
|
|
g_object_ref(self);
|
|
|
|
|
|
|
|
|
|
handle = g_slice_new(NMNetnsWatcherHandle);
|
|
|
|
|
_watcher_handle_init(handle, watcher_type, watcher_data, tag);
|
|
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
if (!g_hash_table_add(priv->watcher_idx, handle))
|
|
|
|
|
nm_assert_not_reached();
|
2023-03-06 15:32:16 +01:00
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
watcher_by_tag = g_hash_table_lookup(priv->watcher_by_tag_idx, &tag);
|
2023-03-06 15:32:16 +01:00
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
if (!watcher_by_tag) {
|
|
|
|
|
watcher_by_tag = g_slice_new(WatcherByTag);
|
2024-10-04 10:45:56 +02:00
|
|
|
*watcher_by_tag = (WatcherByTag) {
|
2023-03-09 07:45:27 +01:00
|
|
|
.tag = tag,
|
|
|
|
|
.watcher_by_tag_lst_head = C_LIST_INIT(watcher_by_tag->watcher_by_tag_lst_head),
|
|
|
|
|
};
|
|
|
|
|
g_hash_table_add(priv->watcher_by_tag_idx, watcher_by_tag);
|
|
|
|
|
}
|
2023-03-06 15:32:16 +01:00
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
c_list_link_tail(&watcher_by_tag->watcher_by_tag_lst_head, &handle->watcher_tag_lst);
|
2023-03-06 15:32:16 +01:00
|
|
|
|
|
|
|
|
is_new = TRUE;
|
|
|
|
|
} else {
|
2023-03-09 07:45:27 +01:00
|
|
|
/* Handles are deduplicated/shared. Hence it is error prone (and likely
|
|
|
|
|
* a bug) to provide different callback/user_data. Such usage is
|
|
|
|
|
* rejected here.
|
2023-03-06 15:32:16 +01:00
|
|
|
*
|
|
|
|
|
* This could be made to work, for example by now allowing handles to
|
|
|
|
|
* be merged or simply requiring the caller to be careful to not get
|
|
|
|
|
* this wrong. But that is currently not implemented nor needed.
|
|
|
|
|
*/
|
|
|
|
|
nm_assert(!tag
|
|
|
|
|
|| (handle->callback == callback && handle->callback_user_data == user_data));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (_LOGT_ENABLED()
|
|
|
|
|
&& (is_new || handle->callback != callback || handle->callback_user_data != user_data)) {
|
|
|
|
|
_LOGT("netns-watcher: %s %s",
|
|
|
|
|
is_new ? "register" : "update",
|
|
|
|
|
_watcher_handle_to_string(handle, sbuf, sizeof(sbuf)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
handle->callback = callback;
|
|
|
|
|
handle->callback_user_data = user_data;
|
|
|
|
|
handle->watcher_dirty = FALSE;
|
|
|
|
|
|
|
|
|
|
if (is_new)
|
|
|
|
|
_watcher_register_handle(self, handle);
|
|
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
/* We cannot return a handle here, because handles are deduplicated via the priv->watchers_idx dictionary.
|
|
|
|
|
* The usage pattern is to use nm_netns_watcher_remove_all(), and not remove them one by one.
|
|
|
|
|
* As nm_netns_watcher_add() can return the same handle more than once, the user
|
|
|
|
|
* wouldn't know when it's safe to call nm_netns_watcher_remove_handle().
|
|
|
|
|
*
|
|
|
|
|
* This could be extended by adding a ref-count to the handles. But that is not
|
|
|
|
|
* used currently, so it's not possible to remove watcher by their handle. */
|
2023-03-06 15:32:16 +01:00
|
|
|
}
|
|
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
static void
|
2023-03-06 15:32:16 +01:00
|
|
|
nm_netns_watcher_remove_handle(NMNetns *self, NMNetnsWatcherHandle *handle)
|
|
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv;
|
|
|
|
|
char sbuf[500];
|
|
|
|
|
|
|
|
|
|
g_return_if_fail(NM_IS_NETNS(self));
|
|
|
|
|
g_return_if_fail(handle);
|
2023-03-09 07:45:27 +01:00
|
|
|
nm_assert(handle->tag);
|
2023-03-06 15:32:16 +01:00
|
|
|
|
|
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
nm_assert(g_hash_table_lookup(priv->watcher_idx, handle) == handle);
|
2023-03-06 15:32:16 +01:00
|
|
|
|
|
|
|
|
_LOGT("netns-watcher: %s %s",
|
|
|
|
|
"unregister",
|
|
|
|
|
_watcher_handle_to_string(handle, sbuf, sizeof(sbuf)));
|
|
|
|
|
|
|
|
|
|
_watcher_unregister_handle(self, handle);
|
|
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
if (!g_hash_table_remove(priv->watcher_idx, handle))
|
|
|
|
|
nm_assert_not_reached();
|
2023-03-06 15:32:16 +01:00
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
if (c_list_is_empty_or_single(&handle->watcher_tag_lst)) {
|
|
|
|
|
if (!g_hash_table_remove(priv->watcher_by_tag_idx, &handle->tag))
|
|
|
|
|
nm_assert_not_reached();
|
2023-03-06 15:32:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
c_list_unlink_stale(&handle->watcher_tag_lst);
|
|
|
|
|
nm_g_slice_free(handle);
|
|
|
|
|
|
2023-03-09 07:45:27 +01:00
|
|
|
if (G_UNLIKELY(g_hash_table_size(priv->watcher_idx) == 0))
|
2023-03-06 15:32:16 +01:00
|
|
|
g_object_unref(self);
|
|
|
|
|
}
|
|
|
|
|
|
2025-02-20 13:43:50 +01:00
|
|
|
static void
|
|
|
|
|
watcher_remove(NMNetns *self, gconstpointer tag, gboolean all)
|
2023-03-06 15:32:16 +01:00
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv;
|
|
|
|
|
WatcherByTag *watcher_by_tag;
|
|
|
|
|
NMNetnsWatcherHandle *handle;
|
|
|
|
|
NMNetnsWatcherHandle *handle_safe;
|
|
|
|
|
|
|
|
|
|
g_return_if_fail(NM_IS_NETNS(self));
|
|
|
|
|
|
|
|
|
|
/* remove-all only works with handles that have a tag associated.
|
|
|
|
|
* Since NMNetns can have multiple users that are unknown to each
|
|
|
|
|
* other, it makes no sense to have a remove-all function which
|
2023-03-09 07:45:27 +01:00
|
|
|
* would remove all of them. */
|
2023-03-06 15:32:16 +01:00
|
|
|
g_return_if_fail(tag);
|
|
|
|
|
|
|
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
watcher_by_tag = g_hash_table_lookup(priv->watcher_by_tag_idx, &tag);
|
|
|
|
|
if (!watcher_by_tag)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
c_list_for_each_entry_safe (handle,
|
|
|
|
|
handle_safe,
|
|
|
|
|
&watcher_by_tag->watcher_by_tag_lst_head,
|
|
|
|
|
watcher_tag_lst) {
|
|
|
|
|
gboolean is_last;
|
|
|
|
|
|
|
|
|
|
if (!all && !handle->watcher_dirty) {
|
|
|
|
|
/* Survivors are marked as dirty. This enables a pattern where you
|
|
|
|
|
* call nm_netns_watcher_add() on the elements you care about
|
|
|
|
|
* (which clears the dirty flag), and then remove all dirty ones
|
|
|
|
|
* with nm_netns_watcher_remove_all() (which marks the remaining
|
|
|
|
|
* handles as dirty for the next time). */
|
|
|
|
|
handle->watcher_dirty = TRUE;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
is_last = c_list_is_empty_or_single(&watcher_by_tag->watcher_by_tag_lst_head);
|
|
|
|
|
nm_netns_watcher_remove_handle(self, handle);
|
|
|
|
|
|
|
|
|
|
if (is_last) {
|
|
|
|
|
/* Removing the last handle destroys the "watcher_by_tag" and may even
|
|
|
|
|
* destroy "self". We must not touch those pointers hereafter.
|
|
|
|
|
*
|
|
|
|
|
* If you ever *not* return here, make sure to handle that! */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-02-20 13:43:50 +01:00
|
|
|
void
|
|
|
|
|
nm_netns_watcher_remove_all(NMNetns *self, gconstpointer tag)
|
|
|
|
|
{
|
|
|
|
|
watcher_remove(self, tag, TRUE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Similar to nm_netns_watcher_remove_all(), but removes only watchers
|
|
|
|
|
* that were marked as "dirty" in a previous call of this function and were
|
|
|
|
|
* not added back via nm_netns_watcher_add() in the meantime. */
|
|
|
|
|
void
|
|
|
|
|
nm_netns_watcher_remove_dirty(NMNetns *self, gconstpointer tag)
|
|
|
|
|
{
|
|
|
|
|
watcher_remove(self, tag, FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-06 15:32:16 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
static void
|
|
|
|
|
set_property(GObject *object, guint prop_id, const GValue *value, GParamSpec *pspec)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns *self = NM_NETNS(object);
|
2017-04-17 18:40:52 +02:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
switch (prop_id) {
|
|
|
|
|
case PROP_PLATFORM:
|
|
|
|
|
/* construct-only */
|
|
|
|
|
priv->platform = g_value_get_object(value) ?: NM_PLATFORM_GET;
|
|
|
|
|
if (!priv->platform)
|
|
|
|
|
g_return_if_reached();
|
|
|
|
|
g_object_ref(priv->platform);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
nm_netns_init(NMNetns *self)
|
|
|
|
|
{
|
2020-07-21 11:21:44 +02:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
priv->_self_signal_user_data = self;
|
2023-03-06 15:32:16 +01:00
|
|
|
|
2020-07-21 11:21:44 +02:00
|
|
|
c_list_init(&priv->l3cfg_signal_pending_lst_head);
|
2022-11-23 08:29:48 +01:00
|
|
|
|
|
|
|
|
G_STATIC_ASSERT_EXPR(G_STRUCT_OFFSET(EcmpTrackObj, obj) == 0);
|
|
|
|
|
priv->ecmp_track_by_obj =
|
|
|
|
|
g_hash_table_new_full(nm_pdirect_hash, nm_pdirect_equal, _ecmp_routes_by_obj_free, NULL);
|
|
|
|
|
priv->ecmp_track_by_ecmpid = g_hash_table_new_full(_ecmp_routes_by_ecmpid_hash,
|
|
|
|
|
_ecmp_routes_by_ecmpid_equal,
|
|
|
|
|
_ecmp_routes_by_ecmpid_free,
|
|
|
|
|
NULL);
|
2023-03-06 15:32:16 +01:00
|
|
|
|
|
|
|
|
priv->watcher_idx = g_hash_table_new(_watcher_handle_hash, _watcher_handle_equal);
|
|
|
|
|
G_STATIC_ASSERT_EXPR(G_STRUCT_OFFSET(WatcherByTag, tag) == 0);
|
|
|
|
|
priv->watcher_by_tag_idx = g_hash_table_new_full(nm_pdirect_hash,
|
|
|
|
|
nm_pdirect_equal,
|
|
|
|
|
(GDestroyNotify) _watcher_by_tag_destroy,
|
|
|
|
|
NULL);
|
|
|
|
|
priv->watcher_ip_data_idx = g_hash_table_new(_watcher_ip_data_hash, _watcher_ip_data_equal);
|
2017-04-17 18:40:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
constructed(GObject *object)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns *self = NM_NETNS(object);
|
2017-04-17 18:40:52 +02:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
if (!priv->platform)
|
|
|
|
|
g_return_if_reached();
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
priv->l3cfgs = g_hash_table_new_full(nm_pint_hash, nm_pint_equal, _l3cfg_hashed_free, NULL);
|
2020-07-18 19:01:04 +02:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
priv->platform_netns = nm_platform_netns_get(priv->platform);
|
|
|
|
|
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
priv->global_tracker = nmp_global_tracker_new(priv->platform);
|
platform: support weakly tracked routing rules in NMPRulesManager
Policy routing rules are global, and unlike routes not tied to an interface by ifindex.
That means, while we take full control over all routes of an interface during a sync,
we need to consider that multiple parties can contribute to the global set of rules.
That might be muliple connection profiles providing the same rule, or rules that are added
externally by the user. NMPRulesManager mediates for that.
This is done by NMPRulesManager "tracking" rules.
Rules that are not tracked by NMPRulesManager are completely ignored (and
considered externally added).
When tracking a rule, the caller provides a track-priority. If multiple
parties track a rule, then the highest (absolute value of the) priority
wins.
If the highest track-priority is positive, NMPRulesManager will add the rule if
it's not present.
When the highest track-priority is negative, then NMPRulesManager will remove the
rule if it's present (enforce its absence).
The complicated part is, when a rule that was previously tracked becomes no
longer tracked. In that case, we need to restore the previous state.
If NetworkManager added the rule earlier, then untracking the rule
NMPRulesManager will remove the rule again (restore its previous absent
state).
By default, if NetworkManager had a negative tracking-priority and removed the
rule earlier (enforced it to be absent), then when the rule becomes no
longer tracked, NetworkManager will not restore the rule.
Consider: the user adds a rule externally, and then activates a profile that
enforces the absence of the rule (causing NetworkManager to remove it).
When deactivating the profile, by default NetworkManager will not
restore such a rule! It's unclear whether that is a good idea, but it's
also unclear why the rule is there and whether NetworkManager should
really restore it.
Add weakly tracked rules to account for that. A tracking-priority of
zero indicates such weakly tracked rules. The only difference between an untracked
rule and a weakly tracked rule is, that when NetworkManager earlier removed the
rule (due to a negative tracking-priority), it *will* restore weakly
tracked rules when the rules becomes no longer (negatively) tracked.
And it attmpts to do that only once.
Likewise, if the rule is weakly tracked and already exists when
NMPRulesManager starts posively tracking the rule, then it would not
remove again, when no longer positively tracking it.
2019-04-10 13:47:52 +02:00
|
|
|
|
policy-routing: take ownership of externally configured rules
IP addresses, routes, TC and QDiscs are all tied to a certain interface.
So when NetworkManager manages an interface, it can be confident that
all related entires should be managed, deleted and modified by NetworkManager.
Routing policy rules are global. For that we have NMPRulesManager which
keeps track of whether NetworkManager owns a rule. This allows multiple
connection profiles to specify the same rule, and NMPRulesManager can
consolidate this information to know whether to add or remove the rule.
NMPRulesManager would also support to explicitly block a rule by
tracking it with negative priority. However that is still unused at
the moment. All that devices do is to add rules (track with positive
priority) and remove them (untrack) once the profile gets deactivated.
As rules are not exclusively owned by NetworkManager, NetworkManager
tries not to interfere with rules that it knows nothing about. That
means in particular, when NetworkManager starts it will "weakly track"
all rules that are present. "weakly track" is mostly interesting for two
cases:
- when NMPRulesManager had the same rule explicitly tracked (added) by a
device, then deactivating the device will leave the rule in place.
- when NMPRulesManager had the same rule explicitly blocked (tracked
with negative priority), then it would restore the rule when that
block gets removed (as said, currently nobody actually does this).
Note that when restarting NetworkManager, then the device may stay and
the rules kept. However after restart, NetworkManager no longer knows
that it previously added this route, so it would weakly track it and
never remove them again.
That is a problem. Avoid that, by whenever explicitly tracking a rule we
also make sure to no longer weakly track it. Most likely this rule was
indeed previously managed by NetworkManager. If this was really a rule
added by externally, then the user really should choose distinct
rule priorities to avoid such conflicts altogether.
2019-07-12 11:19:43 +02:00
|
|
|
/* Weakly track the default rules with a dummy user-tag. These
|
|
|
|
|
* rules are always weekly tracked... */
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
nmp_global_tracker_track_rule_default(priv->global_tracker,
|
|
|
|
|
AF_UNSPEC,
|
|
|
|
|
0,
|
|
|
|
|
nm_netns_parent_class /* static dummy user-tag */);
|
policy-routing: take ownership of externally configured rules
IP addresses, routes, TC and QDiscs are all tied to a certain interface.
So when NetworkManager manages an interface, it can be confident that
all related entires should be managed, deleted and modified by NetworkManager.
Routing policy rules are global. For that we have NMPRulesManager which
keeps track of whether NetworkManager owns a rule. This allows multiple
connection profiles to specify the same rule, and NMPRulesManager can
consolidate this information to know whether to add or remove the rule.
NMPRulesManager would also support to explicitly block a rule by
tracking it with negative priority. However that is still unused at
the moment. All that devices do is to add rules (track with positive
priority) and remove them (untrack) once the profile gets deactivated.
As rules are not exclusively owned by NetworkManager, NetworkManager
tries not to interfere with rules that it knows nothing about. That
means in particular, when NetworkManager starts it will "weakly track"
all rules that are present. "weakly track" is mostly interesting for two
cases:
- when NMPRulesManager had the same rule explicitly tracked (added) by a
device, then deactivating the device will leave the rule in place.
- when NMPRulesManager had the same rule explicitly blocked (tracked
with negative priority), then it would restore the rule when that
block gets removed (as said, currently nobody actually does this).
Note that when restarting NetworkManager, then the device may stay and
the rules kept. However after restart, NetworkManager no longer knows
that it previously added this route, so it would weakly track it and
never remove them again.
That is a problem. Avoid that, by whenever explicitly tracking a rule we
also make sure to no longer weakly track it. Most likely this rule was
indeed previously managed by NetworkManager. If this was really a rule
added by externally, then the user really should choose distinct
rule priorities to avoid such conflicts altogether.
2019-07-12 11:19:43 +02:00
|
|
|
|
|
|
|
|
/* Also weakly track all existing rules. These were added before NetworkManager
|
|
|
|
|
* starts, so they are probably none of NetworkManager's business.
|
|
|
|
|
*
|
|
|
|
|
* However note that during service restart, devices may stay up and rules kept.
|
|
|
|
|
* That means, after restart such rules may have been added by a previous run
|
|
|
|
|
* of NetworkManager, we just don't know.
|
|
|
|
|
*
|
|
|
|
|
* For that reason, whenever we will touch such rules later one, we make them
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
* fully owned and no longer weekly tracked. See %NMP_GLOBAL_TRACKER_EXTERN_WEAKLY_TRACKED_USER_TAG. */
|
|
|
|
|
nmp_global_tracker_track_rule_from_platform(priv->global_tracker,
|
|
|
|
|
NULL,
|
|
|
|
|
AF_UNSPEC,
|
|
|
|
|
0,
|
|
|
|
|
NMP_GLOBAL_TRACKER_EXTERN_WEAKLY_TRACKED_USER_TAG);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
G_OBJECT_CLASS(nm_netns_parent_class)->constructed(object);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-07-21 11:21:44 +02:00
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_LINK_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
l3cfg: add nm_l3cfg_property_emit_register() API
The NML3Cfg instance tracks and prepares the IP configuration.
However, that is also partly exposed on other objects, like
NMIP4Config's "route-data" property.
Add an API, so that NMIP4Config can register itself to be notified
when something relevant changes.
This is an alternative to standard GObject properties and signals. They
often seem more effort than worth. That is, because in this case,
NMIP4Config.route-data has no other task then to re-emit the signal.
So, to implement that with GObject properties/signals, we would have to
add a property/signal to NML3Cfg, subscribe to it from NMIP4Config,
and remit the signal. An alternative is to bind properties, but that
would still be quite some extra code, and unclear that it would be
simpler. Not to mention the overhead, as bindings are themself full
GObject instances, that register to and emit signals by name.
2020-07-21 12:52:42 +02:00
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP4_ROUTE_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
|
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP6_ROUTE_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
2020-07-29 08:39:12 +02:00
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP4_ADDRESS_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
|
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP6_ADDRESS_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
2017-04-17 18:40:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NMNetns *
|
|
|
|
|
nm_netns_new(NMPlatform *platform)
|
|
|
|
|
{
|
|
|
|
|
return g_object_new(NM_TYPE_NETNS, NM_NETNS_PLATFORM, platform, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dispose(GObject *object)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns *self = NM_NETNS(object);
|
2017-04-17 18:40:52 +02:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
nm_assert(nm_g_hash_table_size(priv->l3cfgs) == 0);
|
2020-07-21 11:21:44 +02:00
|
|
|
nm_assert(c_list_is_empty(&priv->l3cfg_signal_pending_lst_head));
|
2023-03-06 15:32:16 +01:00
|
|
|
nm_assert(nm_g_hash_table_size(priv->watcher_idx) == 0);
|
|
|
|
|
nm_assert(nm_g_hash_table_size(priv->watcher_by_tag_idx) == 0);
|
|
|
|
|
nm_assert(nm_g_hash_table_size(priv->watcher_ip_data_idx) == 0);
|
2020-07-21 11:21:44 +02:00
|
|
|
|
2025-09-06 14:46:35 +02:00
|
|
|
for (guint i = 0; i < _NM_NETNS_IP_RESERVATION_TYPE_NUM; i++) {
|
|
|
|
|
nm_assert(!priv->ip_reservation[i]);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
nm_clear_pointer(&priv->ecmp_track_by_obj, g_hash_table_destroy);
|
|
|
|
|
nm_clear_pointer(&priv->ecmp_track_by_ecmpid, g_hash_table_destroy);
|
|
|
|
|
|
2023-03-06 15:32:16 +01:00
|
|
|
nm_clear_pointer(&priv->watcher_idx, g_hash_table_destroy);
|
|
|
|
|
nm_clear_pointer(&priv->watcher_by_tag_idx, g_hash_table_destroy);
|
|
|
|
|
nm_clear_pointer(&priv->watcher_ip_data_idx, g_hash_table_destroy);
|
|
|
|
|
|
2021-10-07 08:04:45 +02:00
|
|
|
nm_clear_g_source_inst(&priv->signal_pending_idle_source);
|
2020-07-21 11:21:44 +02:00
|
|
|
|
|
|
|
|
if (priv->platform)
|
|
|
|
|
g_signal_handlers_disconnect_by_data(priv->platform, &priv->_self_signal_user_data);
|
2020-07-18 19:01:04 +02:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
g_clear_object(&priv->platform);
|
2020-10-28 13:59:22 +01:00
|
|
|
nm_clear_pointer(&priv->l3cfgs, g_hash_table_unref);
|
2017-04-17 18:40:52 +02:00
|
|
|
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
nm_clear_pointer(&priv->global_tracker, nmp_global_tracker_unref);
|
2019-03-11 11:37:40 +01:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
G_OBJECT_CLASS(nm_netns_parent_class)->dispose(object);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
nm_netns_class_init(NMNetnsClass *klass)
|
|
|
|
|
{
|
|
|
|
|
GObjectClass *object_class = G_OBJECT_CLASS(klass);
|
|
|
|
|
|
|
|
|
|
object_class->constructed = constructed;
|
|
|
|
|
object_class->set_property = set_property;
|
|
|
|
|
object_class->dispose = dispose;
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_PLATFORM] =
|
|
|
|
|
g_param_spec_object(NM_NETNS_PLATFORM,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
NM_TYPE_PLATFORM,
|
|
|
|
|
G_PARAM_WRITABLE | G_PARAM_CONSTRUCT_ONLY | G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
g_object_class_install_properties(object_class, _PROPERTY_ENUMS_LAST, obj_properties);
|
|
|
|
|
}
|