2020-12-23 22:21:36 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2019-09-25 13:13:40 +02:00
|
|
|
/*
|
2017-04-17 18:40:52 +02:00
|
|
|
* Copyright (C) 2017 Red Hat, Inc.
|
|
|
|
|
*/
|
|
|
|
|
|
2021-02-04 18:04:13 +01:00
|
|
|
#include "src/core/nm-default-daemon.h"
|
2017-04-17 18:40:52 +02:00
|
|
|
|
|
|
|
|
#include "nm-netns.h"
|
|
|
|
|
|
2021-02-18 17:37:47 +01:00
|
|
|
#include "libnm-glib-aux/nm-dedup-multi.h"
|
|
|
|
|
#include "libnm-glib-aux/nm-c-list.h"
|
core: pass NMDedupMultiIndex instance to NMIP4Config and other
NMIP4Config, NMIP6Config, and NMPlatform shall share one
NMDedupMultiIndex instance.
For that, pass an NMDedupMultiIndex instance to NMPlatform and NMNetns.
NMNetns than passes it on to NMDevice, NMDhcpClient, NMIP4Config and NMIP6Config.
So currently NMNetns is the access point to the shared NMDedupMultiIndex
instance, and it gets it from it's NMPlatform instance.
The NMDedupMultiIndex instance is really a singleton, we don't want
multiple instances of it. However, for testing, instead of adding a
singleton instance, pass the instance explicitly around.
2017-06-12 08:16:47 +02:00
|
|
|
|
2019-03-11 11:37:40 +01:00
|
|
|
#include "NetworkManagerUtils.h"
|
2021-02-12 15:01:09 +01:00
|
|
|
#include "libnm-core-intern/nm-core-internal.h"
|
2020-07-18 19:01:04 +02:00
|
|
|
#include "nm-l3cfg.h"
|
2021-03-04 11:29:39 +01:00
|
|
|
#include "libnm-platform/nm-platform.h"
|
2021-02-18 08:13:35 +01:00
|
|
|
#include "libnm-platform/nmp-netns.h"
|
2022-07-18 10:07:21 +02:00
|
|
|
#include "libnm-platform/nmp-global-tracker.h"
|
2022-11-23 08:29:48 +01:00
|
|
|
#include "libnm-std-aux/c-list-util.h"
|
2017-04-17 18:40:52 +02:00
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
NM_GOBJECT_PROPERTIES_DEFINE_BASE(PROP_PLATFORM, );
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
NMNetns *_self_signal_user_data;
|
|
|
|
|
NMPlatform *platform;
|
|
|
|
|
NMPNetns *platform_netns;
|
|
|
|
|
NMPGlobalTracker *global_tracker;
|
|
|
|
|
GHashTable *l3cfgs;
|
|
|
|
|
GHashTable *shared_ips;
|
2022-11-23 08:29:48 +01:00
|
|
|
GHashTable *ecmp_track_by_obj;
|
|
|
|
|
GHashTable *ecmp_track_by_ecmpid;
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
CList l3cfg_signal_pending_lst_head;
|
|
|
|
|
GSource *signal_pending_idle_source;
|
2017-04-17 18:40:52 +02:00
|
|
|
} NMNetnsPrivate;
|
|
|
|
|
|
|
|
|
|
struct _NMNetns {
|
|
|
|
|
GObject parent;
|
|
|
|
|
NMNetnsPrivate _priv;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct _NMNetnsClass {
|
|
|
|
|
GObjectClass parent;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
G_DEFINE_TYPE(NMNetns, nm_netns, G_TYPE_OBJECT);
|
|
|
|
|
|
|
|
|
|
#define NM_NETNS_GET_PRIVATE(self) _NM_GET_PRIVATE(self, NMNetns, NM_IS_NETNS)
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
#define _NMLOG_DOMAIN LOGD_CORE
|
|
|
|
|
#define _NMLOG_PREFIX_NAME "netns"
|
|
|
|
|
#define _NMLOG(level, ...) \
|
|
|
|
|
G_STMT_START \
|
|
|
|
|
{ \
|
|
|
|
|
nm_log((level), \
|
|
|
|
|
(_NMLOG_DOMAIN), \
|
|
|
|
|
NULL, \
|
|
|
|
|
NULL, \
|
|
|
|
|
"netns[" NM_HASH_OBFUSCATE_PTR_FMT "]: " _NM_UTILS_MACRO_FIRST(__VA_ARGS__), \
|
|
|
|
|
NM_HASH_OBFUSCATE_PTR(self) _NM_UTILS_MACRO_REST(__VA_ARGS__)); \
|
|
|
|
|
} \
|
|
|
|
|
G_STMT_END
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
NM_DEFINE_SINGLETON_GETTER(NMNetns, nm_netns_get, NM_TYPE_NETNS);
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
#define nm_assert_l3cfg(self, l3cfg) \
|
|
|
|
|
G_STMT_START \
|
|
|
|
|
{ \
|
|
|
|
|
NMNetns *_self = (self); \
|
|
|
|
|
NML3Cfg *_l3cfg = (l3cfg); \
|
|
|
|
|
\
|
|
|
|
|
nm_assert(NM_IS_NETNS(self)); \
|
|
|
|
|
nm_assert(NM_IS_L3CFG(_l3cfg)); \
|
|
|
|
|
if (NM_MORE_ASSERTS > 5) \
|
|
|
|
|
nm_assert(_l3cfg == nm_netns_l3cfg_get(_self, nm_l3cfg_get_ifindex(_l3cfg))); \
|
|
|
|
|
} \
|
|
|
|
|
G_STMT_END
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
const NMPObject *representative_obj;
|
|
|
|
|
const NMPObject *merged_obj;
|
|
|
|
|
CList ecmpid_lst_head;
|
|
|
|
|
bool needs_update : 1;
|
|
|
|
|
bool already_visited : 1;
|
|
|
|
|
} EcmpTrackEcmpid;
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
const NMPObject *obj;
|
|
|
|
|
|
|
|
|
|
NML3Cfg *l3cfg;
|
|
|
|
|
EcmpTrackEcmpid *parent_track_ecmpid;
|
|
|
|
|
|
|
|
|
|
CList ifindex_lst;
|
|
|
|
|
CList ecmpid_lst;
|
|
|
|
|
|
|
|
|
|
/* Calling nm_netns_ip_route_ecmp_register() will ensure that the tracked
|
|
|
|
|
* entry is non-dirty. This can be used to remove stale entries. */
|
|
|
|
|
bool dirty : 1;
|
2023-02-01 08:37:51 +01:00
|
|
|
|
|
|
|
|
/* This flag is set during nm_netns_ip_route_ecmp_register(), when first tracking the
|
|
|
|
|
* route. It is cleared on the next nm_netns_ip_route_ecmp_commit(). It thus only
|
|
|
|
|
* exists for a short time, to know during a commit that the route is new and
|
|
|
|
|
* we need to do something special. */
|
2023-01-31 10:30:04 +01:00
|
|
|
bool is_new : 1;
|
2023-02-01 08:37:51 +01:00
|
|
|
|
|
|
|
|
/* The entry is ready to be configured. This exists, because the nexthop of
|
|
|
|
|
* a route must be reachable directly (being onlink). That is, we may need
|
|
|
|
|
* to add a direct, single-hop route to the gateway, which is done by
|
|
|
|
|
* the NML3Cfg of that interface. Since the NML3Cfg calls nm_netns_ip_route_ecmp_commit()
|
|
|
|
|
* and only adds the direct route afterwards, the ECMP route may not be ready
|
|
|
|
|
* right away, but only upon seeing the entry a second time. */
|
2023-01-31 10:30:04 +01:00
|
|
|
bool is_ready : 1;
|
2022-11-23 08:29:48 +01:00
|
|
|
} EcmpTrackObj;
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
_ecmp_track_sort_lst_cmp(const CList *a, const CList *b, const void *user_data)
|
|
|
|
|
{
|
|
|
|
|
EcmpTrackObj *track_obj_a = c_list_entry(a, EcmpTrackObj, ecmpid_lst);
|
|
|
|
|
EcmpTrackObj *track_obj_b = c_list_entry(b, EcmpTrackObj, ecmpid_lst);
|
|
|
|
|
const NMPlatformIP4Route *route_a = NMP_OBJECT_CAST_IP4_ROUTE(track_obj_a->obj);
|
|
|
|
|
const NMPlatformIP4Route *route_b = NMP_OBJECT_CAST_IP4_ROUTE(track_obj_b->obj);
|
|
|
|
|
|
|
|
|
|
nm_assert(route_a->ifindex > 0);
|
|
|
|
|
nm_assert(route_a->n_nexthops <= 1);
|
|
|
|
|
nm_assert(route_b->ifindex > 0);
|
|
|
|
|
nm_assert(route_b->n_nexthops <= 1);
|
|
|
|
|
|
|
|
|
|
NM_CMP_FIELD(route_a, route_b, ifindex);
|
|
|
|
|
NM_CMP_FIELD(route_b, route_a, weight);
|
|
|
|
|
NM_CMP_DIRECT(htonl(route_a->gateway), htonl(route_b->gateway));
|
|
|
|
|
|
|
|
|
|
return nm_assert_unreachable_val(
|
|
|
|
|
nm_platform_ip4_route_cmp(route_a, route_b, NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_ecmp_track_init_merged_obj(EcmpTrackEcmpid *track_ecmpid, const NMPObject **out_obj_del)
|
|
|
|
|
{
|
|
|
|
|
EcmpTrackObj *track_obj;
|
|
|
|
|
nm_auto_nmpobj const NMPObject *obj_new = NULL;
|
|
|
|
|
gsize n_nexthops;
|
|
|
|
|
gsize i;
|
|
|
|
|
|
|
|
|
|
nm_assert(track_ecmpid);
|
|
|
|
|
nm_assert(!c_list_is_empty(&track_ecmpid->ecmpid_lst_head));
|
|
|
|
|
nm_assert(track_ecmpid->representative_obj
|
|
|
|
|
== c_list_first_entry(&track_ecmpid->ecmpid_lst_head, EcmpTrackObj, ecmpid_lst)->obj);
|
|
|
|
|
nm_assert(out_obj_del && !*out_obj_del);
|
|
|
|
|
|
|
|
|
|
if (!track_ecmpid->needs_update) {
|
|
|
|
|
/* Already up to date. Nothing to do. */
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
track_ecmpid->needs_update = FALSE;
|
|
|
|
|
|
|
|
|
|
n_nexthops = c_list_length(&track_ecmpid->ecmpid_lst_head);
|
|
|
|
|
|
|
|
|
|
if (n_nexthops == 1) {
|
|
|
|
|
/* There is only a single entry. There is nothing to merge, just set
|
|
|
|
|
* the first entry. */
|
|
|
|
|
obj_new = nmp_object_ref(track_ecmpid->representative_obj);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We want that the nexthop list is deterministic. We thus sort the list and update
|
|
|
|
|
* the representative_obj. */
|
|
|
|
|
c_list_sort(&track_ecmpid->ecmpid_lst_head, _ecmp_track_sort_lst_cmp, NULL);
|
|
|
|
|
nmp_object_ref_set(
|
|
|
|
|
&track_ecmpid->representative_obj,
|
|
|
|
|
c_list_first_entry(&track_ecmpid->ecmpid_lst_head, EcmpTrackObj, ecmpid_lst)->obj);
|
|
|
|
|
|
|
|
|
|
obj_new = nmp_object_clone(track_ecmpid->representative_obj, FALSE);
|
|
|
|
|
|
|
|
|
|
nm_assert(obj_new->ip4_route.n_nexthops <= 1);
|
|
|
|
|
nm_assert(!obj_new->_ip4_route.extra_nexthops);
|
|
|
|
|
|
|
|
|
|
/* Note that there actually cannot be duplicate (ifindex,gateway,weight) tuples, because
|
|
|
|
|
* NML3Cfg uses NM_PLATFORM_IP_ROUTE_CMP_TYPE_ID to track the routes, and track_ecmpid
|
|
|
|
|
* groups them further by NM_PLATFORM_IP_ROUTE_CMP_TYPE_ECMP_ID. The comparison for
|
|
|
|
|
* ECMP_ID is a strict superset of ID, hence there are no dupliated.
|
|
|
|
|
*
|
|
|
|
|
* Also, kernel wouldn't care if there were duplicate nexthops anyway.
|
|
|
|
|
*
|
|
|
|
|
* This means, it's gonna be simple. We sorted the single-hop routes by next-hop,
|
|
|
|
|
* now just create a plain list of the nexthops (no check for duplciates, etc). */
|
|
|
|
|
|
|
|
|
|
((NMPObject *) obj_new)->ip4_route.n_nexthops = n_nexthops;
|
|
|
|
|
((NMPObject *) obj_new)->_ip4_route.extra_nexthops =
|
|
|
|
|
g_new(NMPlatformIP4RtNextHop, n_nexthops - 1u);
|
|
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
|
c_list_for_each_entry (track_obj, &track_ecmpid->ecmpid_lst_head, ecmpid_lst) {
|
|
|
|
|
if (i > 0) {
|
|
|
|
|
const NMPlatformIP4Route *r = NMP_OBJECT_CAST_IP4_ROUTE(track_obj->obj);
|
|
|
|
|
NMPlatformIP4RtNextHop *nh = (gpointer) &obj_new->_ip4_route.extra_nexthops[i - 1];
|
|
|
|
|
|
|
|
|
|
*nh = (NMPlatformIP4RtNextHop){
|
|
|
|
|
.ifindex = r->ifindex,
|
|
|
|
|
.gateway = r->gateway,
|
|
|
|
|
.weight = r->weight,
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
i++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
nm_assert(obj_new);
|
|
|
|
|
if (nmp_object_equal(track_ecmpid->merged_obj, obj_new))
|
2022-12-22 17:58:12 +01:00
|
|
|
/* the objects are equal but the update was needed, for example if the
|
|
|
|
|
* routes were removed from kernel but not from our tracking
|
|
|
|
|
* dictionaries and therefore we tried to register them again. */
|
|
|
|
|
return TRUE;
|
2022-11-23 08:29:48 +01:00
|
|
|
|
|
|
|
|
if (track_ecmpid->merged_obj)
|
|
|
|
|
*out_obj_del = g_steal_pointer(&track_ecmpid->merged_obj);
|
|
|
|
|
track_ecmpid->merged_obj = g_steal_pointer(&obj_new);
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
NMPNetns *
|
|
|
|
|
nm_netns_get_platform_netns(NMNetns *self)
|
|
|
|
|
{
|
|
|
|
|
return NM_NETNS_GET_PRIVATE(self)->platform_netns;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NMPlatform *
|
|
|
|
|
nm_netns_get_platform(NMNetns *self)
|
|
|
|
|
{
|
|
|
|
|
return NM_NETNS_GET_PRIVATE(self)->platform;
|
|
|
|
|
}
|
|
|
|
|
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
NMPGlobalTracker *
|
|
|
|
|
nm_netns_get_global_tracker(NMNetns *self)
|
2019-03-11 11:37:40 +01:00
|
|
|
{
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
return NM_NETNS_GET_PRIVATE(self)->global_tracker;
|
2019-03-11 11:37:40 +01:00
|
|
|
}
|
|
|
|
|
|
core: pass NMDedupMultiIndex instance to NMIP4Config and other
NMIP4Config, NMIP6Config, and NMPlatform shall share one
NMDedupMultiIndex instance.
For that, pass an NMDedupMultiIndex instance to NMPlatform and NMNetns.
NMNetns than passes it on to NMDevice, NMDhcpClient, NMIP4Config and NMIP6Config.
So currently NMNetns is the access point to the shared NMDedupMultiIndex
instance, and it gets it from it's NMPlatform instance.
The NMDedupMultiIndex instance is really a singleton, we don't want
multiple instances of it. However, for testing, instead of adding a
singleton instance, pass the instance explicitly around.
2017-06-12 08:16:47 +02:00
|
|
|
NMDedupMultiIndex *
|
|
|
|
|
nm_netns_get_multi_idx(NMNetns *self)
|
|
|
|
|
{
|
|
|
|
|
return nm_platform_get_multi_idx(NM_NETNS_GET_PRIVATE(self)->platform);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
static guint
|
|
|
|
|
_ecmp_routes_by_ecmpid_hash(gconstpointer ptr)
|
|
|
|
|
{
|
|
|
|
|
const NMPObject *const *p_obj = ptr;
|
|
|
|
|
|
|
|
|
|
return nm_platform_ip4_route_hash(NMP_OBJECT_CAST_IP4_ROUTE(*p_obj),
|
|
|
|
|
NM_PLATFORM_IP_ROUTE_CMP_TYPE_ECMP_ID);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
_ecmp_routes_by_ecmpid_equal(gconstpointer ptr_a, gconstpointer ptr_b)
|
|
|
|
|
{
|
|
|
|
|
const NMPObject *const *p_obj_a = ptr_a;
|
|
|
|
|
const NMPObject *const *p_obj_b = ptr_b;
|
|
|
|
|
|
|
|
|
|
return nm_platform_ip4_route_cmp(NMP_OBJECT_CAST_IP4_ROUTE(*p_obj_a),
|
|
|
|
|
NMP_OBJECT_CAST_IP4_ROUTE(*p_obj_b),
|
|
|
|
|
NM_PLATFORM_IP_ROUTE_CMP_TYPE_ECMP_ID)
|
|
|
|
|
== 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_ecmp_routes_by_ecmpid_free(gpointer ptr)
|
|
|
|
|
{
|
|
|
|
|
EcmpTrackEcmpid *track_ecmpid = ptr;
|
|
|
|
|
|
|
|
|
|
c_list_unlink_stale(&track_ecmpid->ecmpid_lst_head);
|
|
|
|
|
nmp_object_unref(track_ecmpid->representative_obj);
|
|
|
|
|
nmp_object_unref(track_ecmpid->merged_obj);
|
|
|
|
|
nm_g_slice_free(track_ecmpid);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_ecmp_routes_by_obj_free(gpointer ptr)
|
|
|
|
|
{
|
|
|
|
|
EcmpTrackObj *track_obj = ptr;
|
|
|
|
|
|
|
|
|
|
c_list_unlink_stale(&track_obj->ifindex_lst);
|
|
|
|
|
c_list_unlink_stale(&track_obj->ecmpid_lst);
|
|
|
|
|
nmp_object_unref(track_obj->obj);
|
|
|
|
|
nm_g_slice_free(track_obj);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
static NML3Cfg *
|
|
|
|
|
_l3cfg_hashed_to_l3cfg(gpointer ptr)
|
|
|
|
|
{
|
|
|
|
|
gpointer l3cfg;
|
|
|
|
|
|
|
|
|
|
l3cfg = &(((char *) ptr)[-G_STRUCT_OFFSET(NML3Cfg, priv.ifindex)]);
|
|
|
|
|
nm_assert(NM_IS_L3CFG(l3cfg));
|
|
|
|
|
return l3cfg;
|
|
|
|
|
}
|
2020-07-18 19:01:04 +02:00
|
|
|
|
|
|
|
|
static void
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
_l3cfg_hashed_free(gpointer ptr)
|
2020-07-18 19:01:04 +02:00
|
|
|
{
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
NML3Cfg *l3cfg = _l3cfg_hashed_to_l3cfg(ptr);
|
2020-07-18 19:01:04 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
c_list_unlink(&l3cfg->internal_netns.signal_pending_lst);
|
2020-07-18 19:01:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_l3cfg_weak_notify(gpointer data, GObject *where_the_object_was)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns *self = NM_NETNS(data);
|
2020-07-18 19:01:04 +02:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(data);
|
2021-11-09 13:28:54 +01:00
|
|
|
NML3Cfg *l3cfg = NM_L3CFG(where_the_object_was);
|
2020-07-18 19:01:04 +02:00
|
|
|
int ifindex = nm_l3cfg_get_ifindex(l3cfg);
|
|
|
|
|
|
|
|
|
|
if (!g_hash_table_remove(priv->l3cfgs, &ifindex))
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
|
|
|
|
if (NM_UNLIKELY(g_hash_table_size(priv->l3cfgs) == 0))
|
|
|
|
|
g_object_unref(self);
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-21 12:15:06 +02:00
|
|
|
NML3Cfg *
|
2021-08-06 15:17:05 +02:00
|
|
|
nm_netns_l3cfg_get(NMNetns *self, int ifindex)
|
2020-10-21 12:15:06 +02:00
|
|
|
{
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
gpointer ptr;
|
2020-10-21 12:15:06 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
nm_assert(ifindex > 0);
|
2022-12-09 18:04:09 +01:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
ptr = g_hash_table_lookup(priv->l3cfgs, &ifindex);
|
|
|
|
|
return ptr ? _l3cfg_hashed_to_l3cfg(ptr) : NULL;
|
2020-10-21 12:15:06 +02:00
|
|
|
}
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
NML3Cfg *
|
2021-08-06 15:17:05 +02:00
|
|
|
nm_netns_l3cfg_acquire(NMNetns *self, int ifindex)
|
2020-07-18 19:01:04 +02:00
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv;
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
NML3Cfg *l3cfg;
|
2020-07-18 19:01:04 +02:00
|
|
|
|
|
|
|
|
g_return_val_if_fail(NM_IS_NETNS(self), NULL);
|
|
|
|
|
g_return_val_if_fail(ifindex > 0, NULL);
|
|
|
|
|
|
|
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
l3cfg = nm_netns_l3cfg_get(self, ifindex);
|
|
|
|
|
if (l3cfg) {
|
2020-07-18 19:01:04 +02:00
|
|
|
nm_log_trace(LOGD_CORE,
|
|
|
|
|
"l3cfg[" NM_HASH_OBFUSCATE_PTR_FMT ",ifindex=%d] %s",
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
NM_HASH_OBFUSCATE_PTR(l3cfg),
|
2020-07-18 19:01:04 +02:00
|
|
|
ifindex,
|
|
|
|
|
"referenced");
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
return g_object_ref(l3cfg);
|
2020-07-18 19:01:04 +02:00
|
|
|
}
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
l3cfg = nm_l3cfg_new(self, ifindex);
|
2020-07-18 19:01:04 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
if (!g_hash_table_add(priv->l3cfgs, &l3cfg->priv.ifindex))
|
2020-07-18 19:01:04 +02:00
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
|
|
|
|
if (NM_UNLIKELY(g_hash_table_size(priv->l3cfgs) == 1))
|
|
|
|
|
g_object_ref(self);
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
g_object_weak_ref(G_OBJECT(l3cfg), _l3cfg_weak_notify, self);
|
2020-07-18 19:01:04 +02:00
|
|
|
|
|
|
|
|
/* Transfer ownership! We keep only a weak ref. */
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
return l3cfg;
|
2020-07-18 19:01:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2020-07-21 11:21:44 +02:00
|
|
|
static gboolean
|
|
|
|
|
_platform_signal_on_idle_cb(gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
gs_unref_object NMNetns *self = g_object_ref(NM_NETNS(user_data));
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
NML3Cfg *l3cfg;
|
2020-09-16 16:49:46 +02:00
|
|
|
CList work_list;
|
2020-07-21 11:21:44 +02:00
|
|
|
|
2021-10-07 08:04:45 +02:00
|
|
|
nm_clear_g_source_inst(&priv->signal_pending_idle_source);
|
2020-09-16 16:49:46 +02:00
|
|
|
|
|
|
|
|
/* we emit all queued signals together. However, we don't want to hook the
|
|
|
|
|
* main loop for longer than the currently queued elements.
|
|
|
|
|
*
|
|
|
|
|
* If we catch more change events, they will be queued and processed by a future
|
|
|
|
|
* idle handler.
|
|
|
|
|
*
|
|
|
|
|
* Hence, move the list to a temporary list. Isn't CList great? */
|
|
|
|
|
|
|
|
|
|
c_list_init(&work_list);
|
|
|
|
|
c_list_splice(&work_list, &priv->l3cfg_signal_pending_lst_head);
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
while ((l3cfg = c_list_first_entry(&work_list, NML3Cfg, internal_netns.signal_pending_lst))) {
|
|
|
|
|
nm_assert(NM_IS_L3CFG(l3cfg));
|
|
|
|
|
c_list_unlink(&l3cfg->internal_netns.signal_pending_lst);
|
2020-09-23 18:55:08 +02:00
|
|
|
_nm_l3cfg_notify_platform_change_on_idle(
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
l3cfg,
|
|
|
|
|
nm_steal_int(&l3cfg->internal_netns.signal_pending_obj_type_flags));
|
2020-07-21 11:21:44 +02:00
|
|
|
}
|
|
|
|
|
|
2021-10-07 08:04:45 +02:00
|
|
|
return G_SOURCE_CONTINUE;
|
2020-07-21 11:21:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
_platform_signal_cb(NMPlatform *platform,
|
2020-07-21 11:21:44 +02:00
|
|
|
int obj_type_i,
|
|
|
|
|
int ifindex,
|
|
|
|
|
gconstpointer platform_object,
|
|
|
|
|
int change_type_i,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns **p_self)
|
2020-07-21 11:21:44 +02:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns *self = NM_NETNS(*p_self);
|
|
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
2020-07-21 11:21:44 +02:00
|
|
|
const NMPObjectType obj_type = obj_type_i;
|
2020-07-29 08:39:12 +02:00
|
|
|
const NMPlatformSignalChangeType change_type = change_type_i;
|
2022-11-23 08:29:48 +01:00
|
|
|
NML3Cfg *l3cfg;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2022-12-15 17:17:38 +01:00
|
|
|
if (ifindex <= 0) {
|
|
|
|
|
/* platform signal callback could be triggered by nodev routes, skip them */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
l3cfg = nm_netns_l3cfg_get(self, ifindex);
|
|
|
|
|
if (!l3cfg)
|
2020-07-21 11:21:44 +02:00
|
|
|
return;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
l3cfg->internal_netns.signal_pending_obj_type_flags |= nmp_object_type_to_flags(obj_type);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
if (c_list_is_empty(&l3cfg->internal_netns.signal_pending_lst)) {
|
|
|
|
|
c_list_link_tail(&priv->l3cfg_signal_pending_lst_head,
|
|
|
|
|
&l3cfg->internal_netns.signal_pending_lst);
|
2021-10-07 08:04:45 +02:00
|
|
|
if (!priv->signal_pending_idle_source)
|
|
|
|
|
priv->signal_pending_idle_source =
|
|
|
|
|
nm_g_idle_add_source(_platform_signal_on_idle_cb, self);
|
2020-07-29 08:39:12 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
_nm_l3cfg_notify_platform_change(l3cfg, change_type, NMP_OBJECT_UP_CAST(platform_object));
|
2020-07-21 11:21:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
NMNetnsSharedIPHandle *
|
|
|
|
|
nm_netns_shared_ip_reserve(NMNetns *self)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetnsPrivate *priv;
|
2020-09-14 15:07:18 +02:00
|
|
|
NMNetnsSharedIPHandle *handle;
|
|
|
|
|
const in_addr_t addr_start = ntohl(0x0a2a0001u); /* 10.42.0.1 */
|
|
|
|
|
in_addr_t addr;
|
glib-aux: rename IP address related helpers from "nm-inet-utils.h"
- name things related to `in_addr_t`, `struct in6_addr`, `NMIPAddr` as
`nm_ip4_addr_*()`, `nm_ip6_addr_*()`, `nm_ip_addr_*()`, respectively.
- we have a wrapper `nm_inet_ntop()` for `inet_ntop()`. This name
of our wrapper is chosen to be familiar with the libc underlying
function. With this, also name functions that are about string
representations of addresses `nm_inet_*()`, `nm_inet4_*()`,
`nm_inet6_*()`. For example, `nm_inet_parse_str()`,
`nm_inet_is_normalized()`.
<<<<
R() {
git grep -l "$1" | xargs sed -i "s/\<$1\>/$2/g"
}
R NM_CMP_DIRECT_IN4ADDR_SAME_PREFIX NM_CMP_DIRECT_IP4_ADDR_SAME_PREFIX
R NM_CMP_DIRECT_IN6ADDR_SAME_PREFIX NM_CMP_DIRECT_IP6_ADDR_SAME_PREFIX
R NM_UTILS_INET_ADDRSTRLEN NM_INET_ADDRSTRLEN
R _nm_utils_inet4_ntop nm_inet4_ntop
R _nm_utils_inet6_ntop nm_inet6_ntop
R _nm_utils_ip4_get_default_prefix nm_ip4_addr_get_default_prefix
R _nm_utils_ip4_get_default_prefix0 nm_ip4_addr_get_default_prefix0
R _nm_utils_ip4_netmask_to_prefix nm_ip4_addr_netmask_to_prefix
R _nm_utils_ip4_prefix_to_netmask nm_ip4_addr_netmask_from_prefix
R nm_utils_inet4_ntop_dup nm_inet4_ntop_dup
R nm_utils_inet6_ntop_dup nm_inet6_ntop_dup
R nm_utils_inet_ntop nm_inet_ntop
R nm_utils_inet_ntop_dup nm_inet_ntop_dup
R nm_utils_ip4_address_clear_host_address nm_ip4_addr_clear_host_address
R nm_utils_ip4_address_is_link_local nm_ip4_addr_is_link_local
R nm_utils_ip4_address_is_loopback nm_ip4_addr_is_loopback
R nm_utils_ip4_address_is_zeronet nm_ip4_addr_is_zeronet
R nm_utils_ip4_address_same_prefix nm_ip4_addr_same_prefix
R nm_utils_ip4_address_same_prefix_cmp nm_ip4_addr_same_prefix_cmp
R nm_utils_ip6_address_clear_host_address nm_ip6_addr_clear_host_address
R nm_utils_ip6_address_same_prefix nm_ip6_addr_same_prefix
R nm_utils_ip6_address_same_prefix_cmp nm_ip6_addr_same_prefix_cmp
R nm_utils_ip6_is_ula nm_ip6_addr_is_ula
R nm_utils_ip_address_same_prefix nm_ip_addr_same_prefix
R nm_utils_ip_address_same_prefix_cmp nm_ip_addr_same_prefix_cmp
R nm_utils_ip_is_site_local nm_ip_addr_is_site_local
R nm_utils_ipaddr_is_normalized nm_inet_is_normalized
R nm_utils_ipaddr_is_valid nm_inet_is_valid
R nm_utils_ipx_address_clear_host_address nm_ip_addr_clear_host_address
R nm_utils_parse_inaddr nm_inet_parse_str
R nm_utils_parse_inaddr_bin nm_inet_parse_bin
R nm_utils_parse_inaddr_bin_full nm_inet_parse_bin_full
R nm_utils_parse_inaddr_prefix nm_inet_parse_with_prefix_str
R nm_utils_parse_inaddr_prefix_bin nm_inet_parse_with_prefix_bin
R test_nm_utils_ip6_address_same_prefix test_nm_ip_addr_same_prefix
./contrib/scripts/nm-code-format.sh -F
2022-08-19 13:15:20 +02:00
|
|
|
char sbuf_addr[NM_INET_ADDRSTRLEN];
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
/* Find an unused address in the 10.42.x.x range */
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
g_return_val_if_fail(NM_IS_NETNS(self), NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
if (!priv->shared_ips) {
|
|
|
|
|
addr = addr_start;
|
2021-05-20 20:39:38 +02:00
|
|
|
priv->shared_ips = g_hash_table_new(nm_puint32_hash, nm_puint32_equal);
|
2020-09-14 15:07:18 +02:00
|
|
|
g_object_ref(self);
|
|
|
|
|
} else {
|
|
|
|
|
guint32 count;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
nm_assert(g_hash_table_size(priv->shared_ips) > 0);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
count = 0u;
|
|
|
|
|
for (;;) {
|
|
|
|
|
addr = addr_start + htonl(count << 8u);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
handle = g_hash_table_lookup(priv->shared_ips, &addr);
|
|
|
|
|
if (!handle)
|
|
|
|
|
break;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
count++;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
if (count > 0xFFu) {
|
|
|
|
|
if (handle->_ref_count == 1) {
|
|
|
|
|
_LOGE("shared-ip4: ran out of shared IP addresses. Reuse %s/24",
|
glib-aux: rename IP address related helpers from "nm-inet-utils.h"
- name things related to `in_addr_t`, `struct in6_addr`, `NMIPAddr` as
`nm_ip4_addr_*()`, `nm_ip6_addr_*()`, `nm_ip_addr_*()`, respectively.
- we have a wrapper `nm_inet_ntop()` for `inet_ntop()`. This name
of our wrapper is chosen to be familiar with the libc underlying
function. With this, also name functions that are about string
representations of addresses `nm_inet_*()`, `nm_inet4_*()`,
`nm_inet6_*()`. For example, `nm_inet_parse_str()`,
`nm_inet_is_normalized()`.
<<<<
R() {
git grep -l "$1" | xargs sed -i "s/\<$1\>/$2/g"
}
R NM_CMP_DIRECT_IN4ADDR_SAME_PREFIX NM_CMP_DIRECT_IP4_ADDR_SAME_PREFIX
R NM_CMP_DIRECT_IN6ADDR_SAME_PREFIX NM_CMP_DIRECT_IP6_ADDR_SAME_PREFIX
R NM_UTILS_INET_ADDRSTRLEN NM_INET_ADDRSTRLEN
R _nm_utils_inet4_ntop nm_inet4_ntop
R _nm_utils_inet6_ntop nm_inet6_ntop
R _nm_utils_ip4_get_default_prefix nm_ip4_addr_get_default_prefix
R _nm_utils_ip4_get_default_prefix0 nm_ip4_addr_get_default_prefix0
R _nm_utils_ip4_netmask_to_prefix nm_ip4_addr_netmask_to_prefix
R _nm_utils_ip4_prefix_to_netmask nm_ip4_addr_netmask_from_prefix
R nm_utils_inet4_ntop_dup nm_inet4_ntop_dup
R nm_utils_inet6_ntop_dup nm_inet6_ntop_dup
R nm_utils_inet_ntop nm_inet_ntop
R nm_utils_inet_ntop_dup nm_inet_ntop_dup
R nm_utils_ip4_address_clear_host_address nm_ip4_addr_clear_host_address
R nm_utils_ip4_address_is_link_local nm_ip4_addr_is_link_local
R nm_utils_ip4_address_is_loopback nm_ip4_addr_is_loopback
R nm_utils_ip4_address_is_zeronet nm_ip4_addr_is_zeronet
R nm_utils_ip4_address_same_prefix nm_ip4_addr_same_prefix
R nm_utils_ip4_address_same_prefix_cmp nm_ip4_addr_same_prefix_cmp
R nm_utils_ip6_address_clear_host_address nm_ip6_addr_clear_host_address
R nm_utils_ip6_address_same_prefix nm_ip6_addr_same_prefix
R nm_utils_ip6_address_same_prefix_cmp nm_ip6_addr_same_prefix_cmp
R nm_utils_ip6_is_ula nm_ip6_addr_is_ula
R nm_utils_ip_address_same_prefix nm_ip_addr_same_prefix
R nm_utils_ip_address_same_prefix_cmp nm_ip_addr_same_prefix_cmp
R nm_utils_ip_is_site_local nm_ip_addr_is_site_local
R nm_utils_ipaddr_is_normalized nm_inet_is_normalized
R nm_utils_ipaddr_is_valid nm_inet_is_valid
R nm_utils_ipx_address_clear_host_address nm_ip_addr_clear_host_address
R nm_utils_parse_inaddr nm_inet_parse_str
R nm_utils_parse_inaddr_bin nm_inet_parse_bin
R nm_utils_parse_inaddr_bin_full nm_inet_parse_bin_full
R nm_utils_parse_inaddr_prefix nm_inet_parse_with_prefix_str
R nm_utils_parse_inaddr_prefix_bin nm_inet_parse_with_prefix_bin
R test_nm_utils_ip6_address_same_prefix test_nm_ip_addr_same_prefix
./contrib/scripts/nm-code-format.sh -F
2022-08-19 13:15:20 +02:00
|
|
|
nm_inet4_ntop(handle->addr, sbuf_addr));
|
2020-09-14 15:07:18 +02:00
|
|
|
} else {
|
|
|
|
|
_LOGD("shared-ip4: reserved IP address range %s/24 (duplicate)",
|
glib-aux: rename IP address related helpers from "nm-inet-utils.h"
- name things related to `in_addr_t`, `struct in6_addr`, `NMIPAddr` as
`nm_ip4_addr_*()`, `nm_ip6_addr_*()`, `nm_ip_addr_*()`, respectively.
- we have a wrapper `nm_inet_ntop()` for `inet_ntop()`. This name
of our wrapper is chosen to be familiar with the libc underlying
function. With this, also name functions that are about string
representations of addresses `nm_inet_*()`, `nm_inet4_*()`,
`nm_inet6_*()`. For example, `nm_inet_parse_str()`,
`nm_inet_is_normalized()`.
<<<<
R() {
git grep -l "$1" | xargs sed -i "s/\<$1\>/$2/g"
}
R NM_CMP_DIRECT_IN4ADDR_SAME_PREFIX NM_CMP_DIRECT_IP4_ADDR_SAME_PREFIX
R NM_CMP_DIRECT_IN6ADDR_SAME_PREFIX NM_CMP_DIRECT_IP6_ADDR_SAME_PREFIX
R NM_UTILS_INET_ADDRSTRLEN NM_INET_ADDRSTRLEN
R _nm_utils_inet4_ntop nm_inet4_ntop
R _nm_utils_inet6_ntop nm_inet6_ntop
R _nm_utils_ip4_get_default_prefix nm_ip4_addr_get_default_prefix
R _nm_utils_ip4_get_default_prefix0 nm_ip4_addr_get_default_prefix0
R _nm_utils_ip4_netmask_to_prefix nm_ip4_addr_netmask_to_prefix
R _nm_utils_ip4_prefix_to_netmask nm_ip4_addr_netmask_from_prefix
R nm_utils_inet4_ntop_dup nm_inet4_ntop_dup
R nm_utils_inet6_ntop_dup nm_inet6_ntop_dup
R nm_utils_inet_ntop nm_inet_ntop
R nm_utils_inet_ntop_dup nm_inet_ntop_dup
R nm_utils_ip4_address_clear_host_address nm_ip4_addr_clear_host_address
R nm_utils_ip4_address_is_link_local nm_ip4_addr_is_link_local
R nm_utils_ip4_address_is_loopback nm_ip4_addr_is_loopback
R nm_utils_ip4_address_is_zeronet nm_ip4_addr_is_zeronet
R nm_utils_ip4_address_same_prefix nm_ip4_addr_same_prefix
R nm_utils_ip4_address_same_prefix_cmp nm_ip4_addr_same_prefix_cmp
R nm_utils_ip6_address_clear_host_address nm_ip6_addr_clear_host_address
R nm_utils_ip6_address_same_prefix nm_ip6_addr_same_prefix
R nm_utils_ip6_address_same_prefix_cmp nm_ip6_addr_same_prefix_cmp
R nm_utils_ip6_is_ula nm_ip6_addr_is_ula
R nm_utils_ip_address_same_prefix nm_ip_addr_same_prefix
R nm_utils_ip_address_same_prefix_cmp nm_ip_addr_same_prefix_cmp
R nm_utils_ip_is_site_local nm_ip_addr_is_site_local
R nm_utils_ipaddr_is_normalized nm_inet_is_normalized
R nm_utils_ipaddr_is_valid nm_inet_is_valid
R nm_utils_ipx_address_clear_host_address nm_ip_addr_clear_host_address
R nm_utils_parse_inaddr nm_inet_parse_str
R nm_utils_parse_inaddr_bin nm_inet_parse_bin
R nm_utils_parse_inaddr_bin_full nm_inet_parse_bin_full
R nm_utils_parse_inaddr_prefix nm_inet_parse_with_prefix_str
R nm_utils_parse_inaddr_prefix_bin nm_inet_parse_with_prefix_bin
R test_nm_utils_ip6_address_same_prefix test_nm_ip_addr_same_prefix
./contrib/scripts/nm-code-format.sh -F
2022-08-19 13:15:20 +02:00
|
|
|
nm_inet4_ntop(handle->addr, sbuf_addr));
|
2020-09-14 15:07:18 +02:00
|
|
|
}
|
|
|
|
|
handle->_ref_count++;
|
|
|
|
|
return handle;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
handle = g_slice_new(NMNetnsSharedIPHandle);
|
|
|
|
|
*handle = (NMNetnsSharedIPHandle){
|
|
|
|
|
.addr = addr,
|
|
|
|
|
._ref_count = 1,
|
|
|
|
|
._self = self,
|
|
|
|
|
};
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-09-14 15:07:18 +02:00
|
|
|
g_hash_table_add(priv->shared_ips, handle);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
glib-aux: rename IP address related helpers from "nm-inet-utils.h"
- name things related to `in_addr_t`, `struct in6_addr`, `NMIPAddr` as
`nm_ip4_addr_*()`, `nm_ip6_addr_*()`, `nm_ip_addr_*()`, respectively.
- we have a wrapper `nm_inet_ntop()` for `inet_ntop()`. This name
of our wrapper is chosen to be familiar with the libc underlying
function. With this, also name functions that are about string
representations of addresses `nm_inet_*()`, `nm_inet4_*()`,
`nm_inet6_*()`. For example, `nm_inet_parse_str()`,
`nm_inet_is_normalized()`.
<<<<
R() {
git grep -l "$1" | xargs sed -i "s/\<$1\>/$2/g"
}
R NM_CMP_DIRECT_IN4ADDR_SAME_PREFIX NM_CMP_DIRECT_IP4_ADDR_SAME_PREFIX
R NM_CMP_DIRECT_IN6ADDR_SAME_PREFIX NM_CMP_DIRECT_IP6_ADDR_SAME_PREFIX
R NM_UTILS_INET_ADDRSTRLEN NM_INET_ADDRSTRLEN
R _nm_utils_inet4_ntop nm_inet4_ntop
R _nm_utils_inet6_ntop nm_inet6_ntop
R _nm_utils_ip4_get_default_prefix nm_ip4_addr_get_default_prefix
R _nm_utils_ip4_get_default_prefix0 nm_ip4_addr_get_default_prefix0
R _nm_utils_ip4_netmask_to_prefix nm_ip4_addr_netmask_to_prefix
R _nm_utils_ip4_prefix_to_netmask nm_ip4_addr_netmask_from_prefix
R nm_utils_inet4_ntop_dup nm_inet4_ntop_dup
R nm_utils_inet6_ntop_dup nm_inet6_ntop_dup
R nm_utils_inet_ntop nm_inet_ntop
R nm_utils_inet_ntop_dup nm_inet_ntop_dup
R nm_utils_ip4_address_clear_host_address nm_ip4_addr_clear_host_address
R nm_utils_ip4_address_is_link_local nm_ip4_addr_is_link_local
R nm_utils_ip4_address_is_loopback nm_ip4_addr_is_loopback
R nm_utils_ip4_address_is_zeronet nm_ip4_addr_is_zeronet
R nm_utils_ip4_address_same_prefix nm_ip4_addr_same_prefix
R nm_utils_ip4_address_same_prefix_cmp nm_ip4_addr_same_prefix_cmp
R nm_utils_ip6_address_clear_host_address nm_ip6_addr_clear_host_address
R nm_utils_ip6_address_same_prefix nm_ip6_addr_same_prefix
R nm_utils_ip6_address_same_prefix_cmp nm_ip6_addr_same_prefix_cmp
R nm_utils_ip6_is_ula nm_ip6_addr_is_ula
R nm_utils_ip_address_same_prefix nm_ip_addr_same_prefix
R nm_utils_ip_address_same_prefix_cmp nm_ip_addr_same_prefix_cmp
R nm_utils_ip_is_site_local nm_ip_addr_is_site_local
R nm_utils_ipaddr_is_normalized nm_inet_is_normalized
R nm_utils_ipaddr_is_valid nm_inet_is_valid
R nm_utils_ipx_address_clear_host_address nm_ip_addr_clear_host_address
R nm_utils_parse_inaddr nm_inet_parse_str
R nm_utils_parse_inaddr_bin nm_inet_parse_bin
R nm_utils_parse_inaddr_bin_full nm_inet_parse_bin_full
R nm_utils_parse_inaddr_prefix nm_inet_parse_with_prefix_str
R nm_utils_parse_inaddr_prefix_bin nm_inet_parse_with_prefix_bin
R test_nm_utils_ip6_address_same_prefix test_nm_ip_addr_same_prefix
./contrib/scripts/nm-code-format.sh -F
2022-08-19 13:15:20 +02:00
|
|
|
_LOGD("shared-ip4: reserved IP address range %s/24", nm_inet4_ntop(handle->addr, sbuf_addr));
|
2020-09-14 15:07:18 +02:00
|
|
|
return handle;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nm_netns_shared_ip_release(NMNetnsSharedIPHandle *handle)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns *self;
|
2020-09-14 15:07:18 +02:00
|
|
|
NMNetnsPrivate *priv;
|
glib-aux: rename IP address related helpers from "nm-inet-utils.h"
- name things related to `in_addr_t`, `struct in6_addr`, `NMIPAddr` as
`nm_ip4_addr_*()`, `nm_ip6_addr_*()`, `nm_ip_addr_*()`, respectively.
- we have a wrapper `nm_inet_ntop()` for `inet_ntop()`. This name
of our wrapper is chosen to be familiar with the libc underlying
function. With this, also name functions that are about string
representations of addresses `nm_inet_*()`, `nm_inet4_*()`,
`nm_inet6_*()`. For example, `nm_inet_parse_str()`,
`nm_inet_is_normalized()`.
<<<<
R() {
git grep -l "$1" | xargs sed -i "s/\<$1\>/$2/g"
}
R NM_CMP_DIRECT_IN4ADDR_SAME_PREFIX NM_CMP_DIRECT_IP4_ADDR_SAME_PREFIX
R NM_CMP_DIRECT_IN6ADDR_SAME_PREFIX NM_CMP_DIRECT_IP6_ADDR_SAME_PREFIX
R NM_UTILS_INET_ADDRSTRLEN NM_INET_ADDRSTRLEN
R _nm_utils_inet4_ntop nm_inet4_ntop
R _nm_utils_inet6_ntop nm_inet6_ntop
R _nm_utils_ip4_get_default_prefix nm_ip4_addr_get_default_prefix
R _nm_utils_ip4_get_default_prefix0 nm_ip4_addr_get_default_prefix0
R _nm_utils_ip4_netmask_to_prefix nm_ip4_addr_netmask_to_prefix
R _nm_utils_ip4_prefix_to_netmask nm_ip4_addr_netmask_from_prefix
R nm_utils_inet4_ntop_dup nm_inet4_ntop_dup
R nm_utils_inet6_ntop_dup nm_inet6_ntop_dup
R nm_utils_inet_ntop nm_inet_ntop
R nm_utils_inet_ntop_dup nm_inet_ntop_dup
R nm_utils_ip4_address_clear_host_address nm_ip4_addr_clear_host_address
R nm_utils_ip4_address_is_link_local nm_ip4_addr_is_link_local
R nm_utils_ip4_address_is_loopback nm_ip4_addr_is_loopback
R nm_utils_ip4_address_is_zeronet nm_ip4_addr_is_zeronet
R nm_utils_ip4_address_same_prefix nm_ip4_addr_same_prefix
R nm_utils_ip4_address_same_prefix_cmp nm_ip4_addr_same_prefix_cmp
R nm_utils_ip6_address_clear_host_address nm_ip6_addr_clear_host_address
R nm_utils_ip6_address_same_prefix nm_ip6_addr_same_prefix
R nm_utils_ip6_address_same_prefix_cmp nm_ip6_addr_same_prefix_cmp
R nm_utils_ip6_is_ula nm_ip6_addr_is_ula
R nm_utils_ip_address_same_prefix nm_ip_addr_same_prefix
R nm_utils_ip_address_same_prefix_cmp nm_ip_addr_same_prefix_cmp
R nm_utils_ip_is_site_local nm_ip_addr_is_site_local
R nm_utils_ipaddr_is_normalized nm_inet_is_normalized
R nm_utils_ipaddr_is_valid nm_inet_is_valid
R nm_utils_ipx_address_clear_host_address nm_ip_addr_clear_host_address
R nm_utils_parse_inaddr nm_inet_parse_str
R nm_utils_parse_inaddr_bin nm_inet_parse_bin
R nm_utils_parse_inaddr_bin_full nm_inet_parse_bin_full
R nm_utils_parse_inaddr_prefix nm_inet_parse_with_prefix_str
R nm_utils_parse_inaddr_prefix_bin nm_inet_parse_with_prefix_bin
R test_nm_utils_ip6_address_same_prefix test_nm_ip_addr_same_prefix
./contrib/scripts/nm-code-format.sh -F
2022-08-19 13:15:20 +02:00
|
|
|
char sbuf_addr[NM_INET_ADDRSTRLEN];
|
2020-09-14 15:07:18 +02:00
|
|
|
|
|
|
|
|
g_return_if_fail(handle);
|
|
|
|
|
|
|
|
|
|
self = handle->_self;
|
|
|
|
|
|
|
|
|
|
g_return_if_fail(NM_IS_NETNS(self));
|
|
|
|
|
|
|
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
nm_assert(handle->_ref_count > 0);
|
|
|
|
|
nm_assert(handle == nm_g_hash_table_lookup(priv->shared_ips, handle));
|
|
|
|
|
|
|
|
|
|
if (handle->_ref_count > 1) {
|
|
|
|
|
nm_assert(handle->addr == ntohl(0x0A2AFF01u)); /* 10.42.255.1 */
|
|
|
|
|
handle->_ref_count--;
|
|
|
|
|
_LOGD("shared-ip4: release IP address range %s/24 (%d more references held)",
|
glib-aux: rename IP address related helpers from "nm-inet-utils.h"
- name things related to `in_addr_t`, `struct in6_addr`, `NMIPAddr` as
`nm_ip4_addr_*()`, `nm_ip6_addr_*()`, `nm_ip_addr_*()`, respectively.
- we have a wrapper `nm_inet_ntop()` for `inet_ntop()`. This name
of our wrapper is chosen to be familiar with the libc underlying
function. With this, also name functions that are about string
representations of addresses `nm_inet_*()`, `nm_inet4_*()`,
`nm_inet6_*()`. For example, `nm_inet_parse_str()`,
`nm_inet_is_normalized()`.
<<<<
R() {
git grep -l "$1" | xargs sed -i "s/\<$1\>/$2/g"
}
R NM_CMP_DIRECT_IN4ADDR_SAME_PREFIX NM_CMP_DIRECT_IP4_ADDR_SAME_PREFIX
R NM_CMP_DIRECT_IN6ADDR_SAME_PREFIX NM_CMP_DIRECT_IP6_ADDR_SAME_PREFIX
R NM_UTILS_INET_ADDRSTRLEN NM_INET_ADDRSTRLEN
R _nm_utils_inet4_ntop nm_inet4_ntop
R _nm_utils_inet6_ntop nm_inet6_ntop
R _nm_utils_ip4_get_default_prefix nm_ip4_addr_get_default_prefix
R _nm_utils_ip4_get_default_prefix0 nm_ip4_addr_get_default_prefix0
R _nm_utils_ip4_netmask_to_prefix nm_ip4_addr_netmask_to_prefix
R _nm_utils_ip4_prefix_to_netmask nm_ip4_addr_netmask_from_prefix
R nm_utils_inet4_ntop_dup nm_inet4_ntop_dup
R nm_utils_inet6_ntop_dup nm_inet6_ntop_dup
R nm_utils_inet_ntop nm_inet_ntop
R nm_utils_inet_ntop_dup nm_inet_ntop_dup
R nm_utils_ip4_address_clear_host_address nm_ip4_addr_clear_host_address
R nm_utils_ip4_address_is_link_local nm_ip4_addr_is_link_local
R nm_utils_ip4_address_is_loopback nm_ip4_addr_is_loopback
R nm_utils_ip4_address_is_zeronet nm_ip4_addr_is_zeronet
R nm_utils_ip4_address_same_prefix nm_ip4_addr_same_prefix
R nm_utils_ip4_address_same_prefix_cmp nm_ip4_addr_same_prefix_cmp
R nm_utils_ip6_address_clear_host_address nm_ip6_addr_clear_host_address
R nm_utils_ip6_address_same_prefix nm_ip6_addr_same_prefix
R nm_utils_ip6_address_same_prefix_cmp nm_ip6_addr_same_prefix_cmp
R nm_utils_ip6_is_ula nm_ip6_addr_is_ula
R nm_utils_ip_address_same_prefix nm_ip_addr_same_prefix
R nm_utils_ip_address_same_prefix_cmp nm_ip_addr_same_prefix_cmp
R nm_utils_ip_is_site_local nm_ip_addr_is_site_local
R nm_utils_ipaddr_is_normalized nm_inet_is_normalized
R nm_utils_ipaddr_is_valid nm_inet_is_valid
R nm_utils_ipx_address_clear_host_address nm_ip_addr_clear_host_address
R nm_utils_parse_inaddr nm_inet_parse_str
R nm_utils_parse_inaddr_bin nm_inet_parse_bin
R nm_utils_parse_inaddr_bin_full nm_inet_parse_bin_full
R nm_utils_parse_inaddr_prefix nm_inet_parse_with_prefix_str
R nm_utils_parse_inaddr_prefix_bin nm_inet_parse_with_prefix_bin
R test_nm_utils_ip6_address_same_prefix test_nm_ip_addr_same_prefix
./contrib/scripts/nm-code-format.sh -F
2022-08-19 13:15:20 +02:00
|
|
|
nm_inet4_ntop(handle->addr, sbuf_addr),
|
2020-09-14 15:07:18 +02:00
|
|
|
handle->_ref_count);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!g_hash_table_remove(priv->shared_ips, handle))
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
|
|
|
|
if (g_hash_table_size(priv->shared_ips) == 0) {
|
|
|
|
|
nm_clear_pointer(&priv->shared_ips, g_hash_table_unref);
|
|
|
|
|
g_object_unref(self);
|
|
|
|
|
}
|
|
|
|
|
|
glib-aux: rename IP address related helpers from "nm-inet-utils.h"
- name things related to `in_addr_t`, `struct in6_addr`, `NMIPAddr` as
`nm_ip4_addr_*()`, `nm_ip6_addr_*()`, `nm_ip_addr_*()`, respectively.
- we have a wrapper `nm_inet_ntop()` for `inet_ntop()`. This name
of our wrapper is chosen to be familiar with the libc underlying
function. With this, also name functions that are about string
representations of addresses `nm_inet_*()`, `nm_inet4_*()`,
`nm_inet6_*()`. For example, `nm_inet_parse_str()`,
`nm_inet_is_normalized()`.
<<<<
R() {
git grep -l "$1" | xargs sed -i "s/\<$1\>/$2/g"
}
R NM_CMP_DIRECT_IN4ADDR_SAME_PREFIX NM_CMP_DIRECT_IP4_ADDR_SAME_PREFIX
R NM_CMP_DIRECT_IN6ADDR_SAME_PREFIX NM_CMP_DIRECT_IP6_ADDR_SAME_PREFIX
R NM_UTILS_INET_ADDRSTRLEN NM_INET_ADDRSTRLEN
R _nm_utils_inet4_ntop nm_inet4_ntop
R _nm_utils_inet6_ntop nm_inet6_ntop
R _nm_utils_ip4_get_default_prefix nm_ip4_addr_get_default_prefix
R _nm_utils_ip4_get_default_prefix0 nm_ip4_addr_get_default_prefix0
R _nm_utils_ip4_netmask_to_prefix nm_ip4_addr_netmask_to_prefix
R _nm_utils_ip4_prefix_to_netmask nm_ip4_addr_netmask_from_prefix
R nm_utils_inet4_ntop_dup nm_inet4_ntop_dup
R nm_utils_inet6_ntop_dup nm_inet6_ntop_dup
R nm_utils_inet_ntop nm_inet_ntop
R nm_utils_inet_ntop_dup nm_inet_ntop_dup
R nm_utils_ip4_address_clear_host_address nm_ip4_addr_clear_host_address
R nm_utils_ip4_address_is_link_local nm_ip4_addr_is_link_local
R nm_utils_ip4_address_is_loopback nm_ip4_addr_is_loopback
R nm_utils_ip4_address_is_zeronet nm_ip4_addr_is_zeronet
R nm_utils_ip4_address_same_prefix nm_ip4_addr_same_prefix
R nm_utils_ip4_address_same_prefix_cmp nm_ip4_addr_same_prefix_cmp
R nm_utils_ip6_address_clear_host_address nm_ip6_addr_clear_host_address
R nm_utils_ip6_address_same_prefix nm_ip6_addr_same_prefix
R nm_utils_ip6_address_same_prefix_cmp nm_ip6_addr_same_prefix_cmp
R nm_utils_ip6_is_ula nm_ip6_addr_is_ula
R nm_utils_ip_address_same_prefix nm_ip_addr_same_prefix
R nm_utils_ip_address_same_prefix_cmp nm_ip_addr_same_prefix_cmp
R nm_utils_ip_is_site_local nm_ip_addr_is_site_local
R nm_utils_ipaddr_is_normalized nm_inet_is_normalized
R nm_utils_ipaddr_is_valid nm_inet_is_valid
R nm_utils_ipx_address_clear_host_address nm_ip_addr_clear_host_address
R nm_utils_parse_inaddr nm_inet_parse_str
R nm_utils_parse_inaddr_bin nm_inet_parse_bin
R nm_utils_parse_inaddr_bin_full nm_inet_parse_bin_full
R nm_utils_parse_inaddr_prefix nm_inet_parse_with_prefix_str
R nm_utils_parse_inaddr_prefix_bin nm_inet_parse_with_prefix_bin
R test_nm_utils_ip6_address_same_prefix test_nm_ip_addr_same_prefix
./contrib/scripts/nm-code-format.sh -F
2022-08-19 13:15:20 +02:00
|
|
|
_LOGD("shared-ip4: release IP address range %s/24", nm_inet4_ntop(handle->addr, sbuf_addr));
|
2020-09-14 15:07:18 +02:00
|
|
|
|
|
|
|
|
handle->_self = NULL;
|
|
|
|
|
nm_g_slice_free(handle);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
void
|
|
|
|
|
nm_netns_ip_route_ecmp_register(NMNetns *self, NML3Cfg *l3cfg, const NMPObject *obj)
|
|
|
|
|
{
|
|
|
|
|
NMNetnsPrivate *priv;
|
|
|
|
|
EcmpTrackObj *track_obj;
|
|
|
|
|
const NMPlatformIP4Route *route;
|
|
|
|
|
char sbuf[NM_UTILS_TO_STRING_BUFFER_SIZE];
|
|
|
|
|
|
|
|
|
|
nm_assert_l3cfg(self, l3cfg);
|
|
|
|
|
|
|
|
|
|
route = NMP_OBJECT_CAST_IP4_ROUTE(obj);
|
|
|
|
|
|
|
|
|
|
nm_assert(route->ifindex > 0);
|
|
|
|
|
nm_assert(route->ifindex == nm_l3cfg_get_ifindex(l3cfg));
|
|
|
|
|
nm_assert(route->n_nexthops <= 1);
|
|
|
|
|
|
|
|
|
|
priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
track_obj = g_hash_table_lookup(priv->ecmp_track_by_obj, &obj);
|
|
|
|
|
|
|
|
|
|
if (NM_MORE_ASSERTS > 10) {
|
|
|
|
|
EcmpTrackObj *track_obj2;
|
|
|
|
|
gboolean found = FALSE;
|
|
|
|
|
|
|
|
|
|
c_list_for_each_entry (track_obj2,
|
|
|
|
|
&l3cfg->internal_netns.ecmp_track_ifindex_lst_head,
|
|
|
|
|
ifindex_lst) {
|
|
|
|
|
if (track_obj2->obj == obj) {
|
|
|
|
|
found = TRUE;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nm_assert((!!track_obj) == found);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!track_obj) {
|
|
|
|
|
EcmpTrackEcmpid *track_ecmpid;
|
|
|
|
|
|
|
|
|
|
track_ecmpid = g_hash_table_lookup(priv->ecmp_track_by_ecmpid, &obj);
|
|
|
|
|
if (!track_ecmpid) {
|
|
|
|
|
track_ecmpid = g_slice_new(EcmpTrackEcmpid);
|
|
|
|
|
*track_ecmpid = (EcmpTrackEcmpid){
|
|
|
|
|
.representative_obj = nmp_object_ref(obj),
|
|
|
|
|
.merged_obj = NULL,
|
|
|
|
|
.ecmpid_lst_head = C_LIST_INIT(track_ecmpid->ecmpid_lst_head),
|
|
|
|
|
.needs_update = TRUE,
|
|
|
|
|
};
|
|
|
|
|
g_hash_table_add(priv->ecmp_track_by_ecmpid, track_ecmpid);
|
|
|
|
|
} else
|
|
|
|
|
track_ecmpid->needs_update = TRUE;
|
|
|
|
|
|
|
|
|
|
track_obj = g_slice_new(EcmpTrackObj);
|
|
|
|
|
*track_obj = (EcmpTrackObj){
|
|
|
|
|
.obj = nmp_object_ref(obj),
|
|
|
|
|
.l3cfg = l3cfg,
|
|
|
|
|
.parent_track_ecmpid = track_ecmpid,
|
|
|
|
|
.dirty = FALSE,
|
2023-01-31 10:30:04 +01:00
|
|
|
.is_new = TRUE,
|
|
|
|
|
.is_ready = FALSE,
|
2022-11-23 08:29:48 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
g_hash_table_add(priv->ecmp_track_by_obj, track_obj);
|
|
|
|
|
c_list_link_tail(&l3cfg->internal_netns.ecmp_track_ifindex_lst_head,
|
|
|
|
|
&track_obj->ifindex_lst);
|
|
|
|
|
c_list_link_tail(&track_ecmpid->ecmpid_lst_head, &track_obj->ecmpid_lst);
|
|
|
|
|
|
|
|
|
|
_LOGT(
|
|
|
|
|
"ecmp-route: track %s",
|
|
|
|
|
nmp_object_to_string(track_obj->obj, NMP_OBJECT_TO_STRING_PUBLIC, sbuf, sizeof(sbuf)));
|
2022-12-22 17:58:12 +01:00
|
|
|
} else {
|
|
|
|
|
track_obj->dirty = FALSE;
|
|
|
|
|
track_obj->parent_track_ecmpid->needs_update = TRUE;
|
|
|
|
|
}
|
2022-11-23 08:29:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2023-01-31 10:30:04 +01:00
|
|
|
nm_netns_ip_route_ecmp_commit(NMNetns *self,
|
|
|
|
|
NML3Cfg *l3cfg,
|
|
|
|
|
GPtrArray **out_singlehop_routes,
|
|
|
|
|
gboolean is_reapply)
|
2022-11-23 08:29:48 +01:00
|
|
|
{
|
2023-02-01 08:37:51 +01:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
EcmpTrackObj *track_obj;
|
|
|
|
|
EcmpTrackObj *track_obj_safe;
|
|
|
|
|
EcmpTrackEcmpid *track_ecmpid;
|
|
|
|
|
const NMPObject *route_obj;
|
|
|
|
|
char sbuf[NM_UTILS_TO_STRING_BUFFER_SIZE];
|
|
|
|
|
gboolean already_notified = FALSE;
|
2022-11-23 08:29:48 +01:00
|
|
|
|
|
|
|
|
nm_assert_l3cfg(self, l3cfg);
|
|
|
|
|
|
l3cfg: schedule an update after every commit-type/config-data register/unregister
When we register/unregister a commit-type or when we add/remove a
config-data to NML3Cfg, that act only does the registration/addition.
Only on the next commit, are the changes actually done. The purpose
of that is to add/register multiple configurations and commit them later
when ready.
However, it would be wrong to not do the commit a short time after. The
configuration state is dirty with need to be committed, and that should
happen soon.
Worse, when a interface disappears, NMDevice will clear the ifindex and
the NML3Cfg instance, thereby unregistering all config data and commit
type. If we previously commited something, we need to do another follow-up
commit to cleanup that state.
That is for example important with ECMP routes, which are registered in
NMNetns. When NML3Cfg goes down, it always must unregister to properly
cleanup. Failure to do so, causes an assertion failure and crash. This
change fixes that.
Fix that by automatically schedule and idle commit on
register/unregister/add/remove of commit-type/config-data.
It should *always* be permissible to call a AUTO commit from
an idle handler, because various parties cannot use NML3Cfg
independently, and they cannot know when somebody else does a
commit.
Note that NML3Cfg remembers if it presiouvly did a commit
("commit_type_update_sticky"), so even if the last commit-type gets
unregistered, the next commit will still do a sticky update (one more
time).
The only remaining question is what happens during quitting. When
quitting, NetworkManager we may want to leave some interfaces up and
configured. If we were to properly cleanup the NML3Cfg we might need a
mechanism to handle that. However, currently we just leak everything
during quit, so that is not a concern now. It is something that needs
to be addressed in the future.
https://bugzilla.redhat.com/show_bug.cgi?id=2158394
https://gitlab.freedesktop.org/NetworkManager/NetworkManager/-/merge_requests/1505
2023-01-17 17:41:38 +01:00
|
|
|
_LOGT("ecmp-route: committing IPv4 ECMP routes");
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
/* First, delete all dirty entries, and mark the survivors as dirty, so that on the
|
|
|
|
|
* next update they must be touched again. */
|
|
|
|
|
c_list_for_each_entry_safe (track_obj,
|
|
|
|
|
track_obj_safe,
|
|
|
|
|
&l3cfg->internal_netns.ecmp_track_ifindex_lst_head,
|
|
|
|
|
ifindex_lst) {
|
|
|
|
|
track_ecmpid = track_obj->parent_track_ecmpid;
|
|
|
|
|
track_ecmpid->already_visited = FALSE;
|
|
|
|
|
|
core: fix crash in nm_netns_ip_route_ecmp_commit()
#0 0x00000000004c53e0 in nm_netns_ip_route_ecmp_commit (self=0x27bde30, l3cfg=l3cfg@entry=0x2890810, out_singlehop_routes=out_singlehop_routes@entry=0x7ffd0cac3ce8)
at src/core/nm-netns.c:686
#1 0x00000000004b4335 in _commit_collect_routes
(self=self@entry=0x2890810, addr_family=addr_family@entry=2, commit_type=commit_type@entry=NM_L3_CFG_COMMIT_TYPE_UPDATE, routes=routes@entry=0x7ffd0cac3de8, routes_nodev=routes_nodev@entry=0x7ffd0cac3de0) at src/core/nm-l3cfg.c:1183
#2 0x00000000004b8982 in _l3_commit_one
(self=self@entry=0x2890810, addr_family=addr_family@entry=2, commit_type=commit_type@entry=NM_L3_CFG_COMMIT_TYPE_UPDATE, changed_combined_l3cd=<optimized out>, l3cd_old=<optimized out>) at src/core/nm-l3cfg.c:4605
#3 0x00000000004c0f52 in _l3_commit (self=self@entry=0x2890810, commit_type=NM_L3_CFG_COMMIT_TYPE_UPDATE, commit_type@entry=NM_L3_CFG_COMMIT_TYPE_AUTO, is_idle=is_idle@entry=1)
at src/core/nm-l3cfg.c:4786
#4 0x00000000004c11cb in _l3_commit_on_idle_cb (user_data=user_data@entry=0x2890810) at src/core/nm-l3cfg.c:3164
#5 0x00007f532d02dcb2 in g_idle_dispatch (source=0x28f70c0, callback=0x4c116e <_l3_commit_on_idle_cb>, user_data=0x2890810) at ../glib/gmain.c:6124
#6 0x00007f532d02ecbf in g_main_dispatch (context=0x27c2d60) at ../glib/gmain.c:3444
https://bugzilla.redhat.com/show_bug.cgi?id=2158365
Fixes: 5b5ce4268211 ('nm-netns: track ECMP routes')
2023-01-05 10:29:07 +01:00
|
|
|
nm_assert(g_hash_table_lookup(priv->ecmp_track_by_ecmpid, track_ecmpid) == track_ecmpid);
|
|
|
|
|
nm_assert(g_hash_table_lookup(priv->ecmp_track_by_obj, track_obj) == track_obj);
|
|
|
|
|
nm_assert(c_list_contains(&track_ecmpid->ecmpid_lst_head, &track_obj->ecmpid_lst));
|
|
|
|
|
nm_assert(track_obj->l3cfg == l3cfg);
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
if (!track_obj->dirty) {
|
|
|
|
|
/* This one is still in used. Keep it, but mark dirty, so that on the
|
|
|
|
|
* next update cycle, it needs to be touched again or will be deleted. */
|
|
|
|
|
track_obj->dirty = TRUE;
|
2023-01-31 10:30:04 +01:00
|
|
|
if (is_reapply) {
|
|
|
|
|
track_obj->is_new = TRUE;
|
|
|
|
|
track_obj->is_ready = FALSE;
|
|
|
|
|
}
|
|
|
|
|
if (track_obj->is_new) {
|
2023-02-01 08:37:51 +01:00
|
|
|
const NMPlatformIP4Route *route =
|
|
|
|
|
NMP_OBJECT_CAST_IP4_ROUTE(track_ecmpid->merged_obj);
|
|
|
|
|
|
2023-01-31 10:30:04 +01:00
|
|
|
/* This is a new route entry that was just added. Upon first
|
|
|
|
|
* addition, the route is not yet ready for configuration,
|
|
|
|
|
* because we need to make sure that the gateway is reachable
|
|
|
|
|
* via an onlink route. The calling l3cfg will configure that
|
|
|
|
|
* route, but only after returning from this function. So we
|
|
|
|
|
* need to go through one more commit.
|
|
|
|
|
*
|
|
|
|
|
* We also need to make sure that we are called back right
|
|
|
|
|
* after l3cfg configured that route. We achieve that by
|
|
|
|
|
* scheduling another idle commit on "l3cfg". */
|
|
|
|
|
track_obj->is_new = FALSE;
|
2023-02-01 08:37:51 +01:00
|
|
|
if (route && route->gateway == 0) {
|
|
|
|
|
/* This route is onlink. We don't need to configure an onlink route
|
|
|
|
|
* to the gateway, and the route is immediately ready for configuration. */
|
|
|
|
|
track_obj->is_ready = TRUE;
|
|
|
|
|
} else if (c_list_length_is(&track_ecmpid->ecmpid_lst_head, 1)) {
|
2023-01-31 10:30:04 +01:00
|
|
|
/* This route has no merge partner and ends up being a
|
|
|
|
|
* single hop route. It will be returned and configured by
|
|
|
|
|
* the calling "l3cfg".
|
|
|
|
|
*
|
|
|
|
|
* Unlike for multi-hop routes, we don't need to be called
|
|
|
|
|
* again after the onlink route was added. We are done, and
|
|
|
|
|
* don't need to schedule an idle commit. */
|
|
|
|
|
track_obj->is_ready = TRUE;
|
|
|
|
|
} else {
|
2023-02-01 08:37:51 +01:00
|
|
|
/* This is a new route which has a gateway. We need for the "l3cfg"
|
|
|
|
|
* to first configure the onlink route. It's not yet ready for configuration.
|
|
|
|
|
*
|
|
|
|
|
* Instead, schedule an idle commit to make sure we get called back
|
|
|
|
|
* again, and then (upon seeing the entry the second time) the onlink
|
|
|
|
|
* route is already configured and we will be ready. */
|
2023-01-31 10:30:04 +01:00
|
|
|
if (!already_notified) {
|
2023-02-01 08:37:51 +01:00
|
|
|
/* Some micro optimization with already_notified to avoid calling
|
|
|
|
|
* schedule unnecessarily. */
|
2023-01-31 10:30:04 +01:00
|
|
|
already_notified = TRUE;
|
|
|
|
|
nm_l3cfg_commit_on_idle_schedule(l3cfg, NM_L3_CFG_COMMIT_TYPE_AUTO);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
/* We see this entry the second time (or more) so it's ready. */
|
|
|
|
|
track_obj->is_ready = TRUE;
|
|
|
|
|
}
|
2022-11-23 08:29:48 +01:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
core: fix crash in nm_netns_ip_route_ecmp_commit()
#0 0x00000000004c53e0 in nm_netns_ip_route_ecmp_commit (self=0x27bde30, l3cfg=l3cfg@entry=0x2890810, out_singlehop_routes=out_singlehop_routes@entry=0x7ffd0cac3ce8)
at src/core/nm-netns.c:686
#1 0x00000000004b4335 in _commit_collect_routes
(self=self@entry=0x2890810, addr_family=addr_family@entry=2, commit_type=commit_type@entry=NM_L3_CFG_COMMIT_TYPE_UPDATE, routes=routes@entry=0x7ffd0cac3de8, routes_nodev=routes_nodev@entry=0x7ffd0cac3de0) at src/core/nm-l3cfg.c:1183
#2 0x00000000004b8982 in _l3_commit_one
(self=self@entry=0x2890810, addr_family=addr_family@entry=2, commit_type=commit_type@entry=NM_L3_CFG_COMMIT_TYPE_UPDATE, changed_combined_l3cd=<optimized out>, l3cd_old=<optimized out>) at src/core/nm-l3cfg.c:4605
#3 0x00000000004c0f52 in _l3_commit (self=self@entry=0x2890810, commit_type=NM_L3_CFG_COMMIT_TYPE_UPDATE, commit_type@entry=NM_L3_CFG_COMMIT_TYPE_AUTO, is_idle=is_idle@entry=1)
at src/core/nm-l3cfg.c:4786
#4 0x00000000004c11cb in _l3_commit_on_idle_cb (user_data=user_data@entry=0x2890810) at src/core/nm-l3cfg.c:3164
#5 0x00007f532d02dcb2 in g_idle_dispatch (source=0x28f70c0, callback=0x4c116e <_l3_commit_on_idle_cb>, user_data=0x2890810) at ../glib/gmain.c:6124
#6 0x00007f532d02ecbf in g_main_dispatch (context=0x27c2d60) at ../glib/gmain.c:3444
https://bugzilla.redhat.com/show_bug.cgi?id=2158365
Fixes: 5b5ce4268211 ('nm-netns: track ECMP routes')
2023-01-05 10:29:07 +01:00
|
|
|
/* This entry can be dropped. */
|
|
|
|
|
if (!g_hash_table_remove(priv->ecmp_track_by_obj, track_obj))
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
if (c_list_is_empty(&track_ecmpid->ecmpid_lst_head)) {
|
|
|
|
|
if (track_ecmpid->merged_obj) {
|
2023-01-31 10:30:04 +01:00
|
|
|
if (NMP_OBJECT_CAST_IP4_ROUTE(track_ecmpid->merged_obj)->n_nexthops > 1)
|
|
|
|
|
nm_platform_object_delete(priv->platform, track_ecmpid->merged_obj);
|
2022-11-23 08:29:48 +01:00
|
|
|
}
|
|
|
|
|
g_hash_table_remove(priv->ecmp_track_by_ecmpid, track_ecmpid);
|
2022-12-23 12:48:32 +01:00
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We need to update the representative obj. */
|
|
|
|
|
nmp_object_ref_set(
|
|
|
|
|
&track_ecmpid->representative_obj,
|
|
|
|
|
c_list_first_entry(&track_ecmpid->ecmpid_lst_head, EcmpTrackObj, ecmpid_lst)->obj);
|
|
|
|
|
track_ecmpid->needs_update = TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Now, we need to iterate again over all objects, and regenerate the merged_obj. */
|
|
|
|
|
c_list_for_each_entry (track_obj,
|
|
|
|
|
&l3cfg->internal_netns.ecmp_track_ifindex_lst_head,
|
|
|
|
|
ifindex_lst) {
|
2023-02-01 08:37:51 +01:00
|
|
|
const NMPlatformIP4Route *route;
|
2023-01-31 10:30:04 +01:00
|
|
|
EcmpTrackObj *track_obj2;
|
2022-11-23 08:29:48 +01:00
|
|
|
nm_auto_nmpobj const NMPObject *obj_del = NULL;
|
|
|
|
|
gboolean changed;
|
2023-01-31 10:30:04 +01:00
|
|
|
gboolean all_is_ready;
|
2022-11-23 08:29:48 +01:00
|
|
|
|
|
|
|
|
track_ecmpid = track_obj->parent_track_ecmpid;
|
|
|
|
|
if (track_ecmpid->already_visited) {
|
|
|
|
|
/* We already visited this ecmpid in the same loop. We can skip, otherwise
|
|
|
|
|
* we might add the same route twice. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
track_ecmpid->already_visited = TRUE;
|
|
|
|
|
|
2023-01-31 10:30:04 +01:00
|
|
|
all_is_ready = TRUE;
|
|
|
|
|
c_list_for_each_entry (track_obj2, &track_ecmpid->ecmpid_lst_head, ecmpid_lst) {
|
|
|
|
|
if (!track_obj2->is_ready) {
|
|
|
|
|
all_is_ready = FALSE;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (!all_is_ready) {
|
|
|
|
|
/* Here we might have a merged_obj already which can have the wrong
|
|
|
|
|
* setting e.g the wrong nexthops. We leave them for the moment and
|
|
|
|
|
* then we reconfigure it when this entry is ready. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
changed = _ecmp_track_init_merged_obj(track_obj->parent_track_ecmpid, &obj_del);
|
|
|
|
|
|
|
|
|
|
nm_assert(!obj_del || changed);
|
|
|
|
|
|
2023-01-31 10:30:04 +01:00
|
|
|
route_obj = track_ecmpid->merged_obj;
|
2022-11-23 08:29:48 +01:00
|
|
|
route = NMP_OBJECT_CAST_IP4_ROUTE(route_obj);
|
|
|
|
|
|
|
|
|
|
if (obj_del) {
|
2023-01-31 10:30:04 +01:00
|
|
|
if (NMP_OBJECT_CAST_IP4_ROUTE(obj_del)->n_nexthops > 1)
|
|
|
|
|
nm_platform_object_delete(priv->platform, obj_del);
|
|
|
|
|
else if (track_obj->l3cfg != l3cfg)
|
|
|
|
|
nm_l3cfg_commit_on_idle_schedule(track_obj->l3cfg, NM_L3_CFG_COMMIT_TYPE_AUTO);
|
2022-11-23 08:29:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (route->n_nexthops <= 1) {
|
|
|
|
|
/* This is a single hop route. Return it to the caller. */
|
|
|
|
|
if (!*out_singlehop_routes) {
|
|
|
|
|
/* Note that the returned array does not own a reference. This
|
|
|
|
|
* function has only one caller, and for that caller, it's just
|
|
|
|
|
* fine that the result is not additionally kept alive. */
|
|
|
|
|
*out_singlehop_routes =
|
|
|
|
|
g_ptr_array_new_with_free_func((GDestroyNotify) nmp_object_unref);
|
|
|
|
|
}
|
|
|
|
|
g_ptr_array_add(*out_singlehop_routes, (gpointer) nmp_object_ref(route_obj));
|
|
|
|
|
if (changed) {
|
|
|
|
|
_LOGT("ecmp-route: single-hop %s",
|
|
|
|
|
nmp_object_to_string(route_obj,
|
|
|
|
|
NMP_OBJECT_TO_STRING_PUBLIC,
|
|
|
|
|
sbuf,
|
|
|
|
|
sizeof(sbuf)));
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-31 10:30:04 +01:00
|
|
|
if (changed || is_reapply) {
|
2022-11-23 08:29:48 +01:00
|
|
|
_LOGT("ecmp-route: multi-hop %s",
|
|
|
|
|
nmp_object_to_string(route_obj, NMP_OBJECT_TO_STRING_PUBLIC, sbuf, sizeof(sbuf)));
|
2023-01-31 10:30:04 +01:00
|
|
|
nm_platform_ip_route_add(priv->platform, NMP_NLM_FLAG_APPEND, route_obj);
|
2022-11-23 08:29:48 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
static void
|
|
|
|
|
set_property(GObject *object, guint prop_id, const GValue *value, GParamSpec *pspec)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns *self = NM_NETNS(object);
|
2017-04-17 18:40:52 +02:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
switch (prop_id) {
|
|
|
|
|
case PROP_PLATFORM:
|
|
|
|
|
/* construct-only */
|
|
|
|
|
priv->platform = g_value_get_object(value) ?: NM_PLATFORM_GET;
|
|
|
|
|
if (!priv->platform)
|
|
|
|
|
g_return_if_reached();
|
|
|
|
|
g_object_ref(priv->platform);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
nm_netns_init(NMNetns *self)
|
|
|
|
|
{
|
2020-07-21 11:21:44 +02:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
priv->_self_signal_user_data = self;
|
|
|
|
|
c_list_init(&priv->l3cfg_signal_pending_lst_head);
|
2022-11-23 08:29:48 +01:00
|
|
|
|
|
|
|
|
G_STATIC_ASSERT_EXPR(G_STRUCT_OFFSET(EcmpTrackObj, obj) == 0);
|
|
|
|
|
priv->ecmp_track_by_obj =
|
|
|
|
|
g_hash_table_new_full(nm_pdirect_hash, nm_pdirect_equal, _ecmp_routes_by_obj_free, NULL);
|
|
|
|
|
priv->ecmp_track_by_ecmpid = g_hash_table_new_full(_ecmp_routes_by_ecmpid_hash,
|
|
|
|
|
_ecmp_routes_by_ecmpid_equal,
|
|
|
|
|
_ecmp_routes_by_ecmpid_free,
|
|
|
|
|
NULL);
|
2017-04-17 18:40:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
constructed(GObject *object)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns *self = NM_NETNS(object);
|
2017-04-17 18:40:52 +02:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
if (!priv->platform)
|
|
|
|
|
g_return_if_reached();
|
|
|
|
|
|
l3cfg: closer integrate NML3Cfg and NMNetns
NML3Cfg and NMNetns are already strongly related and cooperate.
An NML3Cfg instance is created via NMNetns, which is necessary
because NMNetns ensures that there is only one NML3Cfg instance per
ifindex and it won't ever make sense to have multiple NML3Cfg instances
per namespace.
Note that NMNetns tracks additional information for each NML3Cfg.
Previously, in a pointless attempt to separate code, it did so
by putting that information in another struct (L3CfgData).
But as the classes are strongly related, there really is no
reason why we cannot just attach this information to NML3Cfg
directly. Sure, we want that code has low coupling, high cohesion
but that doesn't mean we gain anything by putting data that is
strongly related to the NML3Cfg to another struct L3CfgData.
The advantage is we save some redundant data and an additional
L3CfgData. But the bigger reason is that with this change, it
will be possible to access the NMNetns specific data directly from
an NML3Cfg instance, without another dictionary lookup. Currently
such a lookup is never used, but it will be.
Basically, NML3Cfg and NMNetns shares some state. It is now in the
"internal_netns" field of the NML3Cfg instead of L3CfgData.
2022-12-09 18:13:20 +01:00
|
|
|
priv->l3cfgs = g_hash_table_new_full(nm_pint_hash, nm_pint_equal, _l3cfg_hashed_free, NULL);
|
2020-07-18 19:01:04 +02:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
priv->platform_netns = nm_platform_netns_get(priv->platform);
|
|
|
|
|
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
priv->global_tracker = nmp_global_tracker_new(priv->platform);
|
platform: support weakly tracked routing rules in NMPRulesManager
Policy routing rules are global, and unlike routes not tied to an interface by ifindex.
That means, while we take full control over all routes of an interface during a sync,
we need to consider that multiple parties can contribute to the global set of rules.
That might be muliple connection profiles providing the same rule, or rules that are added
externally by the user. NMPRulesManager mediates for that.
This is done by NMPRulesManager "tracking" rules.
Rules that are not tracked by NMPRulesManager are completely ignored (and
considered externally added).
When tracking a rule, the caller provides a track-priority. If multiple
parties track a rule, then the highest (absolute value of the) priority
wins.
If the highest track-priority is positive, NMPRulesManager will add the rule if
it's not present.
When the highest track-priority is negative, then NMPRulesManager will remove the
rule if it's present (enforce its absence).
The complicated part is, when a rule that was previously tracked becomes no
longer tracked. In that case, we need to restore the previous state.
If NetworkManager added the rule earlier, then untracking the rule
NMPRulesManager will remove the rule again (restore its previous absent
state).
By default, if NetworkManager had a negative tracking-priority and removed the
rule earlier (enforced it to be absent), then when the rule becomes no
longer tracked, NetworkManager will not restore the rule.
Consider: the user adds a rule externally, and then activates a profile that
enforces the absence of the rule (causing NetworkManager to remove it).
When deactivating the profile, by default NetworkManager will not
restore such a rule! It's unclear whether that is a good idea, but it's
also unclear why the rule is there and whether NetworkManager should
really restore it.
Add weakly tracked rules to account for that. A tracking-priority of
zero indicates such weakly tracked rules. The only difference between an untracked
rule and a weakly tracked rule is, that when NetworkManager earlier removed the
rule (due to a negative tracking-priority), it *will* restore weakly
tracked rules when the rules becomes no longer (negatively) tracked.
And it attmpts to do that only once.
Likewise, if the rule is weakly tracked and already exists when
NMPRulesManager starts posively tracking the rule, then it would not
remove again, when no longer positively tracking it.
2019-04-10 13:47:52 +02:00
|
|
|
|
policy-routing: take ownership of externally configured rules
IP addresses, routes, TC and QDiscs are all tied to a certain interface.
So when NetworkManager manages an interface, it can be confident that
all related entires should be managed, deleted and modified by NetworkManager.
Routing policy rules are global. For that we have NMPRulesManager which
keeps track of whether NetworkManager owns a rule. This allows multiple
connection profiles to specify the same rule, and NMPRulesManager can
consolidate this information to know whether to add or remove the rule.
NMPRulesManager would also support to explicitly block a rule by
tracking it with negative priority. However that is still unused at
the moment. All that devices do is to add rules (track with positive
priority) and remove them (untrack) once the profile gets deactivated.
As rules are not exclusively owned by NetworkManager, NetworkManager
tries not to interfere with rules that it knows nothing about. That
means in particular, when NetworkManager starts it will "weakly track"
all rules that are present. "weakly track" is mostly interesting for two
cases:
- when NMPRulesManager had the same rule explicitly tracked (added) by a
device, then deactivating the device will leave the rule in place.
- when NMPRulesManager had the same rule explicitly blocked (tracked
with negative priority), then it would restore the rule when that
block gets removed (as said, currently nobody actually does this).
Note that when restarting NetworkManager, then the device may stay and
the rules kept. However after restart, NetworkManager no longer knows
that it previously added this route, so it would weakly track it and
never remove them again.
That is a problem. Avoid that, by whenever explicitly tracking a rule we
also make sure to no longer weakly track it. Most likely this rule was
indeed previously managed by NetworkManager. If this was really a rule
added by externally, then the user really should choose distinct
rule priorities to avoid such conflicts altogether.
2019-07-12 11:19:43 +02:00
|
|
|
/* Weakly track the default rules with a dummy user-tag. These
|
|
|
|
|
* rules are always weekly tracked... */
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
nmp_global_tracker_track_rule_default(priv->global_tracker,
|
|
|
|
|
AF_UNSPEC,
|
|
|
|
|
0,
|
|
|
|
|
nm_netns_parent_class /* static dummy user-tag */);
|
policy-routing: take ownership of externally configured rules
IP addresses, routes, TC and QDiscs are all tied to a certain interface.
So when NetworkManager manages an interface, it can be confident that
all related entires should be managed, deleted and modified by NetworkManager.
Routing policy rules are global. For that we have NMPRulesManager which
keeps track of whether NetworkManager owns a rule. This allows multiple
connection profiles to specify the same rule, and NMPRulesManager can
consolidate this information to know whether to add or remove the rule.
NMPRulesManager would also support to explicitly block a rule by
tracking it with negative priority. However that is still unused at
the moment. All that devices do is to add rules (track with positive
priority) and remove them (untrack) once the profile gets deactivated.
As rules are not exclusively owned by NetworkManager, NetworkManager
tries not to interfere with rules that it knows nothing about. That
means in particular, when NetworkManager starts it will "weakly track"
all rules that are present. "weakly track" is mostly interesting for two
cases:
- when NMPRulesManager had the same rule explicitly tracked (added) by a
device, then deactivating the device will leave the rule in place.
- when NMPRulesManager had the same rule explicitly blocked (tracked
with negative priority), then it would restore the rule when that
block gets removed (as said, currently nobody actually does this).
Note that when restarting NetworkManager, then the device may stay and
the rules kept. However after restart, NetworkManager no longer knows
that it previously added this route, so it would weakly track it and
never remove them again.
That is a problem. Avoid that, by whenever explicitly tracking a rule we
also make sure to no longer weakly track it. Most likely this rule was
indeed previously managed by NetworkManager. If this was really a rule
added by externally, then the user really should choose distinct
rule priorities to avoid such conflicts altogether.
2019-07-12 11:19:43 +02:00
|
|
|
|
|
|
|
|
/* Also weakly track all existing rules. These were added before NetworkManager
|
|
|
|
|
* starts, so they are probably none of NetworkManager's business.
|
|
|
|
|
*
|
|
|
|
|
* However note that during service restart, devices may stay up and rules kept.
|
|
|
|
|
* That means, after restart such rules may have been added by a previous run
|
|
|
|
|
* of NetworkManager, we just don't know.
|
|
|
|
|
*
|
|
|
|
|
* For that reason, whenever we will touch such rules later one, we make them
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
* fully owned and no longer weekly tracked. See %NMP_GLOBAL_TRACKER_EXTERN_WEAKLY_TRACKED_USER_TAG. */
|
|
|
|
|
nmp_global_tracker_track_rule_from_platform(priv->global_tracker,
|
|
|
|
|
NULL,
|
|
|
|
|
AF_UNSPEC,
|
|
|
|
|
0,
|
|
|
|
|
NMP_GLOBAL_TRACKER_EXTERN_WEAKLY_TRACKED_USER_TAG);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
G_OBJECT_CLASS(nm_netns_parent_class)->constructed(object);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-07-21 11:21:44 +02:00
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_LINK_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
l3cfg: add nm_l3cfg_property_emit_register() API
The NML3Cfg instance tracks and prepares the IP configuration.
However, that is also partly exposed on other objects, like
NMIP4Config's "route-data" property.
Add an API, so that NMIP4Config can register itself to be notified
when something relevant changes.
This is an alternative to standard GObject properties and signals. They
often seem more effort than worth. That is, because in this case,
NMIP4Config.route-data has no other task then to re-emit the signal.
So, to implement that with GObject properties/signals, we would have to
add a property/signal to NML3Cfg, subscribe to it from NMIP4Config,
and remit the signal. An alternative is to bind properties, but that
would still be quite some extra code, and unclear that it would be
simpler. Not to mention the overhead, as bindings are themself full
GObject instances, that register to and emit signals by name.
2020-07-21 12:52:42 +02:00
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP4_ROUTE_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
|
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP6_ROUTE_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
2020-07-29 08:39:12 +02:00
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP4_ADDRESS_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
|
|
|
|
g_signal_connect(priv->platform,
|
|
|
|
|
NM_PLATFORM_SIGNAL_IP6_ADDRESS_CHANGED,
|
|
|
|
|
G_CALLBACK(_platform_signal_cb),
|
|
|
|
|
&priv->_self_signal_user_data);
|
2017-04-17 18:40:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NMNetns *
|
|
|
|
|
nm_netns_new(NMPlatform *platform)
|
|
|
|
|
{
|
|
|
|
|
return g_object_new(NM_TYPE_NETNS, NM_NETNS_PLATFORM, platform, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dispose(GObject *object)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMNetns *self = NM_NETNS(object);
|
2017-04-17 18:40:52 +02:00
|
|
|
NMNetnsPrivate *priv = NM_NETNS_GET_PRIVATE(self);
|
|
|
|
|
|
2020-07-18 19:01:04 +02:00
|
|
|
nm_assert(nm_g_hash_table_size(priv->l3cfgs) == 0);
|
2020-07-21 11:21:44 +02:00
|
|
|
nm_assert(c_list_is_empty(&priv->l3cfg_signal_pending_lst_head));
|
2020-09-14 15:07:18 +02:00
|
|
|
nm_assert(!priv->shared_ips);
|
2020-07-21 11:21:44 +02:00
|
|
|
|
2022-11-23 08:29:48 +01:00
|
|
|
nm_clear_pointer(&priv->ecmp_track_by_obj, g_hash_table_destroy);
|
|
|
|
|
nm_clear_pointer(&priv->ecmp_track_by_ecmpid, g_hash_table_destroy);
|
|
|
|
|
|
2021-10-07 08:04:45 +02:00
|
|
|
nm_clear_g_source_inst(&priv->signal_pending_idle_source);
|
2020-07-21 11:21:44 +02:00
|
|
|
|
|
|
|
|
if (priv->platform)
|
|
|
|
|
g_signal_handlers_disconnect_by_data(priv->platform, &priv->_self_signal_user_data);
|
2020-07-18 19:01:04 +02:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
g_clear_object(&priv->platform);
|
2020-10-28 13:59:22 +01:00
|
|
|
nm_clear_pointer(&priv->l3cfgs, g_hash_table_unref);
|
2017-04-17 18:40:52 +02:00
|
|
|
|
platform: rename NMPRouteManager to NMPGlobalTracker
NetworkManager primarily manages interfaces in an independent fashion.
That means, whenever possible, we want to have a interface specific
view. In many cases, the underlying kernel API also supports that view.
For example, when configuring IP addresses or unicast routes, we do so
per interfaces and don't need a holistic view.
However, that is not always sufficient. For routing rules and certain
route types (blackhole, unreachable, etc), we need a system wide view
of all the objects in the network namespace.
Originally, NMPRulesManager was added to track routing rules. Then, it
was extended to also track certain route types, and the API was renamed to
NMPRouteManager.
This will also be used to track MPTCP addresses.
So rename again, to give it a general name that is suitable for what it
does. Still, the name is not great (suggestion welcome), but it should
cover the purpose of the API well enough. And it's the best I came
up with.
Rename.
2022-07-18 09:12:09 +02:00
|
|
|
nm_clear_pointer(&priv->global_tracker, nmp_global_tracker_unref);
|
2019-03-11 11:37:40 +01:00
|
|
|
|
2017-04-17 18:40:52 +02:00
|
|
|
G_OBJECT_CLASS(nm_netns_parent_class)->dispose(object);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
nm_netns_class_init(NMNetnsClass *klass)
|
|
|
|
|
{
|
|
|
|
|
GObjectClass *object_class = G_OBJECT_CLASS(klass);
|
|
|
|
|
|
|
|
|
|
object_class->constructed = constructed;
|
|
|
|
|
object_class->set_property = set_property;
|
|
|
|
|
object_class->dispose = dispose;
|
|
|
|
|
|
|
|
|
|
obj_properties[PROP_PLATFORM] =
|
|
|
|
|
g_param_spec_object(NM_NETNS_PLATFORM,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
NM_TYPE_PLATFORM,
|
|
|
|
|
G_PARAM_WRITABLE | G_PARAM_CONSTRUCT_ONLY | G_PARAM_STATIC_STRINGS);
|
|
|
|
|
|
|
|
|
|
g_object_class_install_properties(object_class, _PROPERTY_ENUMS_LAST, obj_properties);
|
|
|
|
|
}
|