2020-12-23 22:21:36 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2019-09-25 13:13:40 +02:00
|
|
|
/*
|
2009-05-20 12:02:18 -04:00
|
|
|
* Copyright (C) 2007 - 2009 Novell, Inc.
|
2017-03-20 13:36:00 +00:00
|
|
|
* Copyright (C) 2007 - 2017 Red Hat, Inc.
|
2008-11-03 04:13:42 +00:00
|
|
|
*/
|
2007-08-15 07:52:25 +00:00
|
|
|
|
2021-02-04 18:04:13 +01:00
|
|
|
#include "src/core/nm-default-daemon.h"
|
2010-08-13 13:18:58 -05:00
|
|
|
|
2016-05-16 18:08:08 +02:00
|
|
|
#include "nm-manager.h"
|
|
|
|
|
|
2011-04-22 14:56:31 -05:00
|
|
|
#include <fcntl.h>
|
2019-12-03 19:00:42 +03:00
|
|
|
#include <limits.h>
|
2021-08-26 14:43:26 +02:00
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <sys/sendfile.h>
|
|
|
|
|
#include <sys/stat.h>
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
|
#include <unistd.h>
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2021-08-26 14:43:26 +02:00
|
|
|
#include "NetworkManagerUtils.h"
|
|
|
|
|
#include "devices/nm-device-factory.h"
|
2016-11-21 00:43:52 +01:00
|
|
|
#include "devices/nm-device-generic.h"
|
2021-08-26 14:43:26 +02:00
|
|
|
#include "devices/nm-device.h"
|
core: add nm_manager_get_dns_manager() getter
nm_dns_manager_get() is already a singleton. So users usually
can just get it whenever they need -- except during shutdown
after the singleton was destroyed. This is usually fine, because
users really should not try to get it late during shutdown.
However, if you subscribe a signal handler on the singleton, then you
will also eventually want to unsubscribe it. While the moment when you
subscribe it is clearly not during late-shutdown, it's not clear how
to ensure that the signal listener gets destroyed before the DNS manager
singleton.
So usually, whenever you are going to subscribe a signal, you need to
make sure that the target object stays alive long enough. Which may
mean to keep a reference to it.
Next, we will have NMDevice subscribe to the singleton. With above said,
that would mean that potentially every NMDevice needs to keep a
reference to the NMDnsManager. That is not best. Also, later NMManager
will face the same problem, because it will also subscribe to
NMDnsManager.
So, instead let NMManager own a reference to the NMDnsManager. This
ensures the lifetimes are properly guarded (NMDevice also references
NMManager already).
Also, access nm_dns_manager_get() lazy on first use, to only initialize
it when needed the first time (which might be quite late).
2022-04-08 11:40:42 +02:00
|
|
|
#include "dns/nm-dns-manager.h"
|
2021-08-26 14:43:26 +02:00
|
|
|
#include "dhcp/nm-dhcp-manager.h"
|
|
|
|
|
#include "libnm-core-aux-intern/nm-common-macros.h"
|
|
|
|
|
#include "libnm-core-intern/nm-core-internal.h"
|
|
|
|
|
#include "libnm-glib-aux/nm-c-list.h"
|
2021-03-04 11:29:39 +01:00
|
|
|
#include "libnm-platform/nm-platform.h"
|
|
|
|
|
#include "libnm-platform/nmp-object.h"
|
2021-08-26 14:43:26 +02:00
|
|
|
#include "libnm-std-aux/nm-dbus-compat.h"
|
2016-09-28 19:37:10 +02:00
|
|
|
#include "nm-act-request.h"
|
2015-07-14 10:26:54 +02:00
|
|
|
#include "nm-audit-manager.h"
|
2021-08-26 14:43:26 +02:00
|
|
|
#include "nm-auth-manager.h"
|
|
|
|
|
#include "nm-auth-utils.h"
|
2016-07-01 12:11:01 +02:00
|
|
|
#include "nm-checkpoint-manager.h"
|
2021-08-26 14:43:26 +02:00
|
|
|
#include "nm-checkpoint.h"
|
|
|
|
|
#include "nm-config.h"
|
|
|
|
|
#include "nm-connectivity.h"
|
|
|
|
|
#include "nm-dbus-manager.h"
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
#include "nm-dbus-object.h"
|
2017-03-20 13:36:00 +00:00
|
|
|
#include "nm-dispatcher.h"
|
2021-08-26 14:43:26 +02:00
|
|
|
#include "nm-hostname-manager.h"
|
|
|
|
|
#include "nm-keep-alive.h"
|
|
|
|
|
#include "nm-policy.h"
|
2021-12-13 16:06:16 +01:00
|
|
|
#include "nm-priv-helper-call.h"
|
2021-08-26 14:43:26 +02:00
|
|
|
#include "nm-rfkill-manager.h"
|
|
|
|
|
#include "nm-session-monitor.h"
|
|
|
|
|
#include "nm-sleep-monitor.h"
|
|
|
|
|
#include "settings/nm-settings-connection.h"
|
|
|
|
|
#include "settings/nm-settings.h"
|
|
|
|
|
#include "vpn/nm-vpn-manager.h"
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2020-03-04 15:44:53 +01:00
|
|
|
#define DEVICE_STATE_PRUNE_RATELIMIT_MAX 100u
|
|
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
/*****************************************************************************/
|
2007-10-01 15:38:39 +00:00
|
|
|
|
2009-12-23 00:03:45 -08:00
|
|
|
typedef struct {
|
2022-02-01 22:08:52 +01:00
|
|
|
guint prop_id;
|
|
|
|
|
guint hw_prop_id;
|
2022-02-01 20:07:18 +01:00
|
|
|
NMConfigRunStatePropertyType key;
|
2022-02-01 22:08:52 +01:00
|
|
|
} RfkillTypeDesc;
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
2022-03-21 10:19:37 +01:00
|
|
|
bool available : 1;
|
2022-02-01 22:08:52 +01:00
|
|
|
bool user_enabled : 1;
|
|
|
|
|
bool sw_enabled : 1;
|
|
|
|
|
bool hw_enabled : 1;
|
2022-02-16 15:54:44 +02:00
|
|
|
bool os_owner : 1;
|
2022-02-01 22:19:36 +01:00
|
|
|
} RfkillRadioState;
|
2009-12-23 00:03:45 -08:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
typedef enum {
|
|
|
|
|
ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_INTERNAL,
|
|
|
|
|
ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_USER,
|
|
|
|
|
ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE,
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE2,
|
2018-04-18 09:06:54 +02:00
|
|
|
} AsyncOpType;
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
CList async_op_lst;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self;
|
2018-04-18 09:06:54 +02:00
|
|
|
AsyncOpType async_op_type;
|
|
|
|
|
union {
|
|
|
|
|
struct {
|
|
|
|
|
NMActiveConnection *active;
|
|
|
|
|
union {
|
|
|
|
|
struct {
|
|
|
|
|
GDBusMethodInvocation *invocation;
|
|
|
|
|
} activate_user;
|
|
|
|
|
struct {
|
2021-11-09 13:28:54 +01:00
|
|
|
GDBusMethodInvocation *invocation;
|
|
|
|
|
NMConnection *connection;
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
NMSettingsConnectionPersistMode persist_mode;
|
|
|
|
|
bool is_volatile : 1;
|
2018-04-18 09:06:54 +02:00
|
|
|
} add_and_activate;
|
|
|
|
|
};
|
|
|
|
|
} ac_auth;
|
|
|
|
|
};
|
|
|
|
|
} AsyncOpData;
|
|
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
enum {
|
|
|
|
|
DEVICE_ADDED,
|
|
|
|
|
INTERNAL_DEVICE_ADDED,
|
|
|
|
|
DEVICE_REMOVED,
|
|
|
|
|
INTERNAL_DEVICE_REMOVED,
|
|
|
|
|
ACTIVE_CONNECTION_ADDED,
|
|
|
|
|
ACTIVE_CONNECTION_REMOVED,
|
|
|
|
|
CONFIGURE_QUIT,
|
2021-01-19 18:57:58 +01:00
|
|
|
DEVICE_IFINDEX_CHANGED,
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
LAST_SIGNAL
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static guint signals[LAST_SIGNAL] = {0};
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
NM_GOBJECT_PROPERTIES_DEFINE(NMManager,
|
|
|
|
|
PROP_VERSION,
|
|
|
|
|
PROP_CAPABILITIES,
|
|
|
|
|
PROP_STATE,
|
|
|
|
|
PROP_STARTUP,
|
|
|
|
|
PROP_NETWORKING_ENABLED,
|
|
|
|
|
PROP_WIRELESS_ENABLED,
|
|
|
|
|
PROP_WIRELESS_HARDWARE_ENABLED,
|
|
|
|
|
PROP_WWAN_ENABLED,
|
|
|
|
|
PROP_WWAN_HARDWARE_ENABLED,
|
|
|
|
|
PROP_WIMAX_ENABLED,
|
|
|
|
|
PROP_WIMAX_HARDWARE_ENABLED,
|
2022-03-21 10:19:37 +01:00
|
|
|
PROP_RADIO_FLAGS,
|
2017-12-05 12:35:38 +01:00
|
|
|
PROP_ACTIVE_CONNECTIONS,
|
|
|
|
|
PROP_CONNECTIVITY,
|
|
|
|
|
PROP_CONNECTIVITY_CHECK_AVAILABLE,
|
|
|
|
|
PROP_CONNECTIVITY_CHECK_ENABLED,
|
2019-07-22 15:55:15 +01:00
|
|
|
PROP_CONNECTIVITY_CHECK_URI,
|
2017-12-05 12:35:38 +01:00
|
|
|
PROP_PRIMARY_CONNECTION,
|
|
|
|
|
PROP_PRIMARY_CONNECTION_TYPE,
|
|
|
|
|
PROP_ACTIVATING_CONNECTION,
|
|
|
|
|
PROP_DEVICES,
|
|
|
|
|
PROP_METERED,
|
|
|
|
|
PROP_GLOBAL_DNS_CONFIGURATION,
|
|
|
|
|
PROP_ALL_DEVICES,
|
|
|
|
|
PROP_CHECKPOINTS,
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
/* Not exported */
|
|
|
|
|
PROP_SLEEPING, );
|
|
|
|
|
|
2007-02-08 15:34:26 +00:00
|
|
|
typedef struct {
|
2017-09-29 15:11:33 +02:00
|
|
|
NMPlatform *platform;
|
|
|
|
|
|
core: add nm_manager_get_dns_manager() getter
nm_dns_manager_get() is already a singleton. So users usually
can just get it whenever they need -- except during shutdown
after the singleton was destroyed. This is usually fine, because
users really should not try to get it late during shutdown.
However, if you subscribe a signal handler on the singleton, then you
will also eventually want to unsubscribe it. While the moment when you
subscribe it is clearly not during late-shutdown, it's not clear how
to ensure that the signal listener gets destroyed before the DNS manager
singleton.
So usually, whenever you are going to subscribe a signal, you need to
make sure that the target object stays alive long enough. Which may
mean to keep a reference to it.
Next, we will have NMDevice subscribe to the singleton. With above said,
that would mean that potentially every NMDevice needs to keep a
reference to the NMDnsManager. That is not best. Also, later NMManager
will face the same problem, because it will also subscribe to
NMDnsManager.
So, instead let NMManager own a reference to the NMDnsManager. This
ensures the lifetimes are properly guarded (NMDevice also references
NMManager already).
Also, access nm_dns_manager_get() lazy on first use, to only initialize
it when needed the first time (which might be quite late).
2022-04-08 11:40:42 +02:00
|
|
|
NMDnsManager *dns_mgr;
|
2022-04-08 12:01:51 +02:00
|
|
|
gulong dns_mgr_update_pending_signal_id;
|
core: add nm_manager_get_dns_manager() getter
nm_dns_manager_get() is already a singleton. So users usually
can just get it whenever they need -- except during shutdown
after the singleton was destroyed. This is usually fine, because
users really should not try to get it late during shutdown.
However, if you subscribe a signal handler on the singleton, then you
will also eventually want to unsubscribe it. While the moment when you
subscribe it is clearly not during late-shutdown, it's not clear how
to ensure that the signal listener gets destroyed before the DNS manager
singleton.
So usually, whenever you are going to subscribe a signal, you need to
make sure that the target object stays alive long enough. Which may
mean to keep a reference to it.
Next, we will have NMDevice subscribe to the singleton. With above said,
that would mean that potentially every NMDevice needs to keep a
reference to the NMDnsManager. That is not best. Also, later NMManager
will face the same problem, because it will also subscribe to
NMDnsManager.
So, instead let NMManager own a reference to the NMDnsManager. This
ensures the lifetimes are properly guarded (NMDevice also references
NMManager already).
Also, access nm_dns_manager_get() lazy on first use, to only initialize
it when needed the first time (which might be quite late).
2022-04-08 11:40:42 +02:00
|
|
|
|
2016-09-15 23:34:24 +03:00
|
|
|
GArray *capabilities;
|
|
|
|
|
|
2020-11-04 13:53:57 +01:00
|
|
|
CList active_connections_lst_head; /* Oldest ACs at the beginning */
|
2018-04-18 09:06:54 +02:00
|
|
|
CList async_op_lst_head;
|
2012-08-22 17:11:31 -05:00
|
|
|
guint ac_cleanup_id;
|
2013-08-22 13:06:51 -04:00
|
|
|
NMActiveConnection *primary_connection;
|
|
|
|
|
NMActiveConnection *activating_connection;
|
2015-06-03 09:15:24 +02:00
|
|
|
NMMetered metered;
|
2012-08-22 17:11:31 -05:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
CList devices_lst_head;
|
|
|
|
|
|
2007-09-25 16:47:53 +00:00
|
|
|
NMState state;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMConfig *config;
|
|
|
|
|
NMConnectivity *concheck_mgr;
|
|
|
|
|
NMPolicy *policy;
|
2017-04-23 14:20:37 +02:00
|
|
|
NMHostnameManager *hostname_manager;
|
|
|
|
|
|
2015-08-20 14:25:46 +02:00
|
|
|
struct {
|
|
|
|
|
GDBusConnection *connection;
|
|
|
|
|
guint id;
|
|
|
|
|
} prop_filter;
|
2013-05-09 10:24:08 -04:00
|
|
|
NMRfkillManager *rfkill_mgr;
|
2008-01-02 13:42:52 +00:00
|
|
|
|
2017-09-29 15:04:53 +02:00
|
|
|
CList link_cb_lst;
|
|
|
|
|
|
2016-07-01 12:11:01 +02:00
|
|
|
NMCheckpointManager *checkpoint_mgr;
|
|
|
|
|
|
2010-10-27 20:05:23 -05:00
|
|
|
NMSettings *settings;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2022-02-01 22:19:36 +01:00
|
|
|
RfkillRadioState radio_states[NM_RFKILL_TYPE_MAX];
|
2022-02-01 20:07:18 +01:00
|
|
|
|
libnm, core, cli, tui: fix the capitalization of various types
GLib/Gtk have mostly settled on the convention that two-letter
acronyms in type names remain all-caps (eg, "IO"), but longer acronyms
become initial-caps-only (eg, "Tcp").
NM was inconsistent, with most long acronyms using initial caps only
(Adsl, Cdma, Dcb, Gsm, Olpc, Vlan), but others using all caps (DHCP,
PPP, PPPOE, VPN). Fix libnm and src/ to use initial-caps only for all
three-or-more-letter-long acronyms (and update nmcli and nmtui for the
libnm changes).
2014-06-26 13:44:36 -04:00
|
|
|
NMVpnManager *vpn_manager;
|
2008-03-26 13:43:01 +00:00
|
|
|
|
2012-10-09 00:36:35 -04:00
|
|
|
NMSleepMonitor *sleep_monitor;
|
2008-07-09 14:05:49 +00:00
|
|
|
|
2016-07-01 14:25:34 +02:00
|
|
|
NMAuthManager *auth_mgr;
|
|
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
GHashTable *device_route_metrics;
|
|
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
CList auth_lst_head;
|
|
|
|
|
|
2016-05-05 14:14:40 +02:00
|
|
|
GHashTable *sleep_devices;
|
2010-05-28 18:23:00 -07:00
|
|
|
|
2010-07-01 10:32:11 -07:00
|
|
|
/* Firmware dir monitor */
|
|
|
|
|
GFileMonitor *fw_monitor;
|
|
|
|
|
guint fw_changed_id;
|
|
|
|
|
|
2010-10-19 11:10:17 +02:00
|
|
|
guint timestamp_update_id;
|
|
|
|
|
|
2017-03-14 10:42:36 +01:00
|
|
|
guint devices_inited_id;
|
|
|
|
|
|
2022-03-21 10:19:37 +01:00
|
|
|
guint radio_flags;
|
|
|
|
|
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
NMConnectivityState connectivity_state;
|
|
|
|
|
|
2020-03-04 15:44:53 +01:00
|
|
|
guint8 device_state_prune_ratelimit_count;
|
|
|
|
|
|
2017-03-14 10:42:36 +01:00
|
|
|
bool startup : 1;
|
2020-02-17 13:24:28 +01:00
|
|
|
bool devices_inited : 1;
|
2017-03-14 10:42:36 +01:00
|
|
|
|
|
|
|
|
bool sleeping : 1;
|
|
|
|
|
bool net_enabled : 1;
|
2017-12-05 13:55:25 +01:00
|
|
|
|
2018-04-10 15:55:16 +02:00
|
|
|
unsigned connectivity_check_enabled_last : 2;
|
|
|
|
|
|
2022-07-11 16:06:14 +02:00
|
|
|
/* List of GDBusMethodInvocation of in progress Sleep() and Enable()
|
|
|
|
|
* calls. They return only if all in-flight deactivations finished. */
|
|
|
|
|
GSList *sleep_invocations;
|
|
|
|
|
|
2017-12-05 13:55:25 +01:00
|
|
|
guint delete_volatile_connection_idle_id;
|
|
|
|
|
CList delete_volatile_connection_lst_head;
|
2007-02-08 15:34:26 +00:00
|
|
|
} NMManagerPrivate;
|
|
|
|
|
|
2016-04-28 14:03:32 +02:00
|
|
|
struct _NMManager {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMDBusObject parent;
|
2016-07-01 15:23:29 +02:00
|
|
|
NMManagerPrivate _priv;
|
2016-04-28 14:03:32 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMDBusObjectClass parent;
|
2021-08-31 13:20:59 +02:00
|
|
|
|
|
|
|
|
#if WITH_OPENVSWITCH
|
|
|
|
|
/* these fields only serve the purpose to use the symbols.*/
|
2021-12-13 16:06:16 +01:00
|
|
|
void (*_use_symbol_nm_priv_helper_call_get_fd)(void);
|
|
|
|
|
void (*_use_symbol_nm_priv_helper_utils_open_fd)(void);
|
2021-08-31 13:20:59 +02:00
|
|
|
#endif
|
|
|
|
|
|
2016-04-28 14:03:32 +02:00
|
|
|
} NMManagerClass;
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
G_DEFINE_TYPE(NMManager, nm_manager, NM_TYPE_DBUS_OBJECT)
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2016-09-05 16:55:07 +02:00
|
|
|
#define NM_MANAGER_GET_PRIVATE(self) _NM_GET_PRIVATE(self, NMManager, NM_IS_MANAGER)
|
|
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
/*****************************************************************************/
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2016-01-18 14:04:56 +01:00
|
|
|
NM_DEFINE_SINGLETON_INSTANCE(NMManager);
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2016-01-18 14:04:56 +01:00
|
|
|
|
|
|
|
|
#define _NMLOG_PREFIX_NAME "manager"
|
2016-03-02 11:38:26 +01:00
|
|
|
#define _NMLOG(level, domain, ...) \
|
2016-01-18 14:04:56 +01:00
|
|
|
G_STMT_START \
|
|
|
|
|
{ \
|
2017-06-07 12:46:10 +02:00
|
|
|
const NMLogLevel _level = (level); \
|
|
|
|
|
const NMLogDomain _domain = (domain); \
|
2016-01-18 14:04:56 +01:00
|
|
|
\
|
2017-06-07 12:46:10 +02:00
|
|
|
if (nm_logging_enabled(_level, _domain)) { \
|
|
|
|
|
const NMManager *const _self = (self); \
|
|
|
|
|
char _sbuf[32]; \
|
2016-03-02 11:38:26 +01:00
|
|
|
\
|
2017-06-07 12:46:10 +02:00
|
|
|
_nm_log(_level, \
|
|
|
|
|
_domain, \
|
|
|
|
|
0, \
|
|
|
|
|
NULL, \
|
|
|
|
|
NULL, \
|
2016-03-02 11:38:26 +01:00
|
|
|
"%s%s: " _NM_UTILS_MACRO_FIRST(__VA_ARGS__), \
|
|
|
|
|
_NMLOG_PREFIX_NAME, \
|
2017-06-07 12:46:10 +02:00
|
|
|
((_self && _self != singleton_instance) ? nm_sprintf_buf(_sbuf, "[%p]", _self) \
|
|
|
|
|
: "") \
|
|
|
|
|
_NM_UTILS_MACRO_REST(__VA_ARGS__)); \
|
|
|
|
|
} \
|
|
|
|
|
} \
|
|
|
|
|
G_STMT_END
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-06-07 12:46:10 +02:00
|
|
|
#define _NMLOG2(level, domain, device, ...) \
|
|
|
|
|
G_STMT_START \
|
|
|
|
|
{ \
|
|
|
|
|
const NMLogLevel _level = (level); \
|
|
|
|
|
const NMLogDomain _domain = (domain); \
|
|
|
|
|
\
|
|
|
|
|
if (nm_logging_enabled(_level, _domain)) { \
|
|
|
|
|
const NMManager *const _self = (self); \
|
|
|
|
|
const char *const _ifname = _nm_device_get_iface(device); \
|
|
|
|
|
char _sbuf[32]; \
|
|
|
|
|
\
|
|
|
|
|
_nm_log(_level, \
|
|
|
|
|
_domain, \
|
|
|
|
|
0, \
|
|
|
|
|
_ifname, \
|
|
|
|
|
NULL, \
|
|
|
|
|
"%s%s: %s%s%s" _NM_UTILS_MACRO_FIRST(__VA_ARGS__), \
|
|
|
|
|
_NMLOG_PREFIX_NAME, \
|
|
|
|
|
((_self && _self != singleton_instance) ? nm_sprintf_buf(_sbuf, "[%p]", _self) \
|
|
|
|
|
: ""), \
|
|
|
|
|
NM_PRINT_FMT_QUOTED(_ifname, "(", _ifname, "): ", "") \
|
2016-03-02 11:38:26 +01:00
|
|
|
_NM_UTILS_MACRO_REST(__VA_ARGS__)); \
|
|
|
|
|
} \
|
2016-01-18 14:04:56 +01:00
|
|
|
} \
|
|
|
|
|
G_STMT_END
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-06-07 12:46:10 +02:00
|
|
|
#define _NMLOG3(level, domain, connection, ...) \
|
|
|
|
|
G_STMT_START \
|
|
|
|
|
{ \
|
|
|
|
|
const NMLogLevel _level = (level); \
|
|
|
|
|
const NMLogDomain _domain = (domain); \
|
|
|
|
|
\
|
|
|
|
|
if (nm_logging_enabled(_level, _domain)) { \
|
|
|
|
|
const NMManager *const _self = (self); \
|
|
|
|
|
NMConnection *const _connection = (connection); \
|
|
|
|
|
const char *const _con_id = _nm_connection_get_id(_connection); \
|
|
|
|
|
char _sbuf[32]; \
|
|
|
|
|
\
|
|
|
|
|
_nm_log(_level, \
|
|
|
|
|
_domain, \
|
|
|
|
|
0, \
|
|
|
|
|
NULL, \
|
|
|
|
|
_nm_connection_get_uuid(_connection), \
|
|
|
|
|
"%s%s: %s%s%s" _NM_UTILS_MACRO_FIRST(__VA_ARGS__), \
|
|
|
|
|
_NMLOG_PREFIX_NAME, \
|
|
|
|
|
((_self && _self != singleton_instance) ? nm_sprintf_buf(_sbuf, "[%p]", _self) \
|
|
|
|
|
: ""), \
|
|
|
|
|
NM_PRINT_FMT_QUOTED(_con_id, "(", _con_id, ") ", "") \
|
|
|
|
|
_NM_UTILS_MACRO_REST(__VA_ARGS__)); \
|
2020-09-28 16:03:33 +02:00
|
|
|
} \
|
2017-06-07 12:46:10 +02:00
|
|
|
} \
|
|
|
|
|
G_STMT_END
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2011-01-10 23:39:12 -06:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
static const NMDBusInterfaceInfoExtended interface_info_manager;
|
|
|
|
|
static const GDBusSignalInfo signal_info_check_permissions;
|
|
|
|
|
static const GDBusSignalInfo signal_info_state_changed;
|
|
|
|
|
static const GDBusSignalInfo signal_info_device_added;
|
|
|
|
|
static const GDBusSignalInfo signal_info_device_removed;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-03-21 11:32:32 +01:00
|
|
|
static void update_connectivity_value(NMManager *self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
static gboolean add_device(NMManager *self, NMDevice *device, GError **error);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
static void _emit_device_added_removed(NMManager *self, NMDevice *device, gboolean is_added);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-11-09 13:28:54 +01:00
|
|
|
static NMActiveConnection *_new_active_connection(NMManager *self,
|
2018-04-12 11:32:18 +02:00
|
|
|
gboolean is_vpn,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMSettingsConnection *sett_conn,
|
|
|
|
|
NMConnection *incompl_conn,
|
|
|
|
|
NMConnection *applied,
|
|
|
|
|
const char *specific_object,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
NMAuthSubject *subject,
|
2017-12-05 12:35:38 +01:00
|
|
|
NMActivationType activation_type,
|
2018-03-28 17:18:04 +02:00
|
|
|
NMActivationReason activation_reason,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
NMActivationStateFlags initial_state_flags,
|
2021-11-09 13:28:54 +01:00
|
|
|
GError **error);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-06-28 18:05:05 +02:00
|
|
|
static void policy_activating_ac_changed(GObject *object, GParamSpec *pspec, gpointer user_data);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2022-04-08 12:01:51 +02:00
|
|
|
static void device_has_pending_action_changed(NMDevice *device, GParamSpec *pspec, NMManager *self);
|
|
|
|
|
static void check_if_startup_complete(NMManager *self);
|
|
|
|
|
|
2021-11-09 13:28:54 +01:00
|
|
|
static gboolean find_master(NMManager *self,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
NMDevice *device,
|
2017-12-05 12:35:38 +01:00
|
|
|
NMSettingsConnection **out_master_connection,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice **out_master_device,
|
|
|
|
|
NMActiveConnection **out_master_ac,
|
|
|
|
|
GError **error);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
static void nm_manager_update_state(NMManager *manager);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
static void connection_changed(NMManager *self, NMSettingsConnection *sett_conn);
|
2017-12-05 12:35:38 +01:00
|
|
|
static void device_sleep_cb(NMDevice *device, GParamSpec *pspec, NMManager *self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
static void
|
|
|
|
|
settings_startup_complete_changed(NMSettings *settings, GParamSpec *pspec, NMManager *self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
static void retry_connections_for_parent_device(NMManager *self, NMDevice *device);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-08-22 17:11:31 -05:00
|
|
|
static void
|
|
|
|
|
active_connection_state_changed(NMActiveConnection *active, GParamSpec *pspec, NMManager *self);
|
2014-06-06 15:30:24 -04:00
|
|
|
static void
|
|
|
|
|
active_connection_default_changed(NMActiveConnection *active, GParamSpec *pspec, NMManager *self);
|
2016-03-24 15:20:44 +01:00
|
|
|
static void active_connection_parent_active(NMActiveConnection *active,
|
|
|
|
|
NMActiveConnection *parent_ac,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-11-09 13:28:54 +01:00
|
|
|
static NMActiveConnection *active_connection_find(NMManager *self,
|
|
|
|
|
NMSettingsConnection *sett_conn,
|
|
|
|
|
const char *uuid,
|
2018-04-19 15:27:54 +02:00
|
|
|
NMActiveConnectionState max_state,
|
2021-05-13 10:49:39 +02:00
|
|
|
gboolean also_waiting_auth,
|
2021-11-09 13:28:54 +01:00
|
|
|
GPtrArray **out_all_matching);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-10 15:55:16 +02:00
|
|
|
static NMConnectivity *concheck_get_mgr(NMManager *self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-11-09 13:28:54 +01:00
|
|
|
static void _internal_activation_auth_done(NMManager *self,
|
2018-04-18 09:06:54 +02:00
|
|
|
NMActiveConnection *active,
|
|
|
|
|
gboolean success,
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *error_desc);
|
|
|
|
|
static void _add_and_activate_auth_done(NMManager *self,
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
AsyncOpType async_op_type,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMActiveConnection *active,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
NMSettingsConnectionPersistMode persist_mode,
|
|
|
|
|
gboolean is_volatile,
|
2018-04-18 09:06:54 +02:00
|
|
|
gboolean success,
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *error_desc);
|
|
|
|
|
static void _activation_auth_done(NMManager *self,
|
|
|
|
|
NMActiveConnection *active,
|
2018-04-18 09:06:54 +02:00
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
gboolean success,
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *error_desc);
|
2018-04-18 09:06:54 +02:00
|
|
|
|
2022-03-21 10:19:37 +01:00
|
|
|
static void _rfkill_update(NMManager *self, NMRfkillType rtype);
|
|
|
|
|
|
2017-12-05 12:35:38 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2020-09-23 10:53:06 +02:00
|
|
|
static NM_CACHED_QUARK_FCN("autoconnect-root", autoconnect_root_quark);
|
2017-12-05 12:35:38 +01:00
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2018-04-12 11:19:06 +02:00
|
|
|
static gboolean
|
|
|
|
|
_connection_is_vpn(NMConnection *connection)
|
|
|
|
|
{
|
|
|
|
|
const char *type;
|
|
|
|
|
|
|
|
|
|
type = nm_connection_get_connection_type(connection);
|
|
|
|
|
if (type)
|
|
|
|
|
return nm_streq(type, NM_SETTING_VPN_SETTING_NAME);
|
|
|
|
|
|
|
|
|
|
/* we have an incomplete (invalid) connection at hand. That can only
|
|
|
|
|
* happen during AddAndActivate. Determine whether it's VPN type based
|
2018-09-15 07:20:54 -04:00
|
|
|
* on the existence of a [vpn] section. */
|
2018-04-12 11:19:06 +02:00
|
|
|
return !!nm_connection_get_setting_vpn(connection);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2018-04-10 15:55:16 +02:00
|
|
|
static gboolean
|
|
|
|
|
concheck_enabled(NMManager *self, gboolean *out_changed)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
guint check_enabled;
|
|
|
|
|
|
|
|
|
|
check_enabled = nm_connectivity_check_enabled(concheck_get_mgr(self)) ? 1 : 2;
|
|
|
|
|
if (priv->connectivity_check_enabled_last == check_enabled)
|
|
|
|
|
NM_SET_OUT(out_changed, FALSE);
|
|
|
|
|
else {
|
|
|
|
|
NM_SET_OUT(out_changed, TRUE);
|
|
|
|
|
priv->connectivity_check_enabled_last = check_enabled;
|
|
|
|
|
}
|
|
|
|
|
return check_enabled == 1;
|
|
|
|
|
}
|
|
|
|
|
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
static void
|
|
|
|
|
concheck_config_changed_cb(NMConnectivity *connectivity, NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2018-04-10 15:55:16 +02:00
|
|
|
gboolean changed;
|
|
|
|
|
|
|
|
|
|
concheck_enabled(self, &changed);
|
|
|
|
|
if (changed)
|
|
|
|
|
_notify(self, PROP_CONNECTIVITY_CHECK_ENABLED);
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst)
|
|
|
|
|
nm_device_check_connectivity_update_interval(device);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static NMConnectivity *
|
|
|
|
|
concheck_get_mgr(NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
if (G_UNLIKELY(!priv->concheck_mgr)) {
|
|
|
|
|
priv->concheck_mgr = g_object_ref(nm_connectivity_get());
|
|
|
|
|
g_signal_connect(priv->concheck_mgr,
|
|
|
|
|
NM_CONNECTIVITY_CONFIG_CHANGED,
|
|
|
|
|
G_CALLBACK(concheck_config_changed_cb),
|
|
|
|
|
self);
|
|
|
|
|
}
|
|
|
|
|
return priv->concheck_mgr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
static AsyncOpData *
|
|
|
|
|
_async_op_data_new_authorize_activate_internal(NMManager *self, NMActiveConnection *active_take)
|
|
|
|
|
{
|
|
|
|
|
AsyncOpData *async_op_data;
|
|
|
|
|
|
|
|
|
|
async_op_data = g_slice_new0(AsyncOpData);
|
|
|
|
|
async_op_data->async_op_type = ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_INTERNAL;
|
|
|
|
|
async_op_data->self = g_object_ref(self);
|
|
|
|
|
async_op_data->ac_auth.active = active_take;
|
|
|
|
|
c_list_link_tail(&NM_MANAGER_GET_PRIVATE(self)->async_op_lst_head,
|
|
|
|
|
&async_op_data->async_op_lst);
|
|
|
|
|
return async_op_data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static AsyncOpData *
|
2021-11-09 13:28:54 +01:00
|
|
|
_async_op_data_new_ac_auth_activate_user(NMManager *self,
|
|
|
|
|
NMActiveConnection *active_take,
|
2018-04-18 09:06:54 +02:00
|
|
|
GDBusMethodInvocation *invocation_take)
|
|
|
|
|
{
|
|
|
|
|
AsyncOpData *async_op_data;
|
|
|
|
|
|
|
|
|
|
async_op_data = g_slice_new0(AsyncOpData);
|
|
|
|
|
async_op_data->async_op_type = ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_USER;
|
|
|
|
|
async_op_data->self = g_object_ref(self);
|
|
|
|
|
async_op_data->ac_auth.active = active_take;
|
|
|
|
|
async_op_data->ac_auth.activate_user.invocation = invocation_take;
|
|
|
|
|
c_list_link_tail(&NM_MANAGER_GET_PRIVATE(self)->async_op_lst_head,
|
|
|
|
|
&async_op_data->async_op_lst);
|
|
|
|
|
return async_op_data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static AsyncOpData *
|
2021-11-09 13:28:54 +01:00
|
|
|
_async_op_data_new_ac_auth_add_and_activate(NMManager *self,
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
AsyncOpType async_op_type,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMActiveConnection *active_take,
|
|
|
|
|
GDBusMethodInvocation *invocation_take,
|
|
|
|
|
NMConnection *connection_take,
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
NMSettingsConnectionPersistMode persist_mode,
|
|
|
|
|
gboolean is_volatile)
|
2018-04-18 09:06:54 +02:00
|
|
|
{
|
|
|
|
|
AsyncOpData *async_op_data;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
nm_assert(NM_IN_SET(async_op_type,
|
|
|
|
|
ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE,
|
|
|
|
|
ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE2));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
async_op_data = g_slice_new0(AsyncOpData);
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
async_op_data->async_op_type = async_op_type;
|
2018-04-18 09:06:54 +02:00
|
|
|
async_op_data->self = g_object_ref(self);
|
|
|
|
|
async_op_data->ac_auth.active = active_take;
|
|
|
|
|
async_op_data->ac_auth.add_and_activate.invocation = invocation_take;
|
|
|
|
|
async_op_data->ac_auth.add_and_activate.connection = connection_take;
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
async_op_data->ac_auth.add_and_activate.persist_mode = persist_mode;
|
|
|
|
|
async_op_data->ac_auth.add_and_activate.is_volatile = is_volatile;
|
2018-04-18 09:06:54 +02:00
|
|
|
c_list_link_tail(&NM_MANAGER_GET_PRIVATE(self)->async_op_lst_head,
|
|
|
|
|
&async_op_data->async_op_lst);
|
|
|
|
|
return async_op_data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_async_op_complete_ac_auth_cb(NMActiveConnection *active,
|
|
|
|
|
gboolean success,
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *error_desc,
|
2018-04-18 09:06:54 +02:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
AsyncOpData *async_op_data = user_data;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
nm_assert(async_op_data);
|
|
|
|
|
nm_assert(NM_IS_MANAGER(async_op_data->self));
|
|
|
|
|
nm_assert(
|
|
|
|
|
nm_c_list_contains_entry(&NM_MANAGER_GET_PRIVATE(async_op_data->self)->async_op_lst_head,
|
|
|
|
|
async_op_data,
|
|
|
|
|
async_op_lst));
|
|
|
|
|
nm_assert(NM_IS_ACTIVE_CONNECTION(active));
|
|
|
|
|
nm_assert(active == async_op_data->ac_auth.active);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
c_list_unlink(&async_op_data->async_op_lst);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
switch (async_op_data->async_op_type) {
|
|
|
|
|
case ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_INTERNAL:
|
|
|
|
|
_internal_activation_auth_done(async_op_data->self,
|
|
|
|
|
async_op_data->ac_auth.active,
|
|
|
|
|
success,
|
|
|
|
|
error_desc);
|
|
|
|
|
break;
|
|
|
|
|
case ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_USER:
|
|
|
|
|
_activation_auth_done(async_op_data->self,
|
|
|
|
|
async_op_data->ac_auth.active,
|
|
|
|
|
async_op_data->ac_auth.activate_user.invocation,
|
|
|
|
|
success,
|
|
|
|
|
error_desc);
|
|
|
|
|
break;
|
|
|
|
|
case ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE:
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
case ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE2:
|
2018-04-18 09:06:54 +02:00
|
|
|
_add_and_activate_auth_done(async_op_data->self,
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
async_op_data->async_op_type,
|
2018-04-18 09:06:54 +02:00
|
|
|
async_op_data->ac_auth.active,
|
|
|
|
|
async_op_data->ac_auth.add_and_activate.connection,
|
|
|
|
|
async_op_data->ac_auth.add_and_activate.invocation,
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
async_op_data->ac_auth.add_and_activate.persist_mode,
|
|
|
|
|
async_op_data->ac_auth.add_and_activate.is_volatile,
|
2018-04-18 09:06:54 +02:00
|
|
|
success,
|
|
|
|
|
error_desc);
|
|
|
|
|
g_object_unref(async_op_data->ac_auth.add_and_activate.connection);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
break;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
g_object_unref(async_op_data->ac_auth.active);
|
|
|
|
|
g_object_unref(async_op_data->self);
|
|
|
|
|
g_slice_free(AsyncOpData, async_op_data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
typedef struct {
|
|
|
|
|
int ifindex;
|
|
|
|
|
guint32 aspired_metric;
|
|
|
|
|
guint32 effective_metric;
|
|
|
|
|
} DeviceRouteMetricData;
|
|
|
|
|
|
|
|
|
|
static DeviceRouteMetricData *
|
2017-12-20 12:45:02 +01:00
|
|
|
_device_route_metric_data_new(int ifindex, guint32 aspired_metric, guint32 effective_metric)
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
{
|
|
|
|
|
DeviceRouteMetricData *data;
|
|
|
|
|
|
|
|
|
|
nm_assert(ifindex > 0);
|
|
|
|
|
|
|
|
|
|
/* For IPv4, metrics can use the entire uint32 bit range. For IPv6,
|
|
|
|
|
* zero is treated like 1024. Since we handle IPv4 and IPv6 identically,
|
|
|
|
|
* we cannot allow a zero metric here.
|
|
|
|
|
*/
|
2017-12-20 12:45:02 +01:00
|
|
|
nm_assert(aspired_metric > 0);
|
|
|
|
|
nm_assert(effective_metric == 0 || aspired_metric <= effective_metric);
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
|
|
|
|
|
data = g_slice_new0(DeviceRouteMetricData);
|
|
|
|
|
data->ifindex = ifindex;
|
2017-12-20 12:45:02 +01:00
|
|
|
data->aspired_metric = aspired_metric;
|
|
|
|
|
data->effective_metric = effective_metric ?: aspired_metric;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
return data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static guint
|
|
|
|
|
_device_route_metric_data_by_ifindex_hash(gconstpointer p)
|
|
|
|
|
{
|
|
|
|
|
const DeviceRouteMetricData *data = p;
|
|
|
|
|
|
2022-09-23 12:56:54 +02:00
|
|
|
return nm_hash_val(1030338191, data->ifindex);
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_device_route_metric_data_by_ifindex_equal(gconstpointer pa, gconstpointer pb)
|
|
|
|
|
{
|
|
|
|
|
const DeviceRouteMetricData *a = pa;
|
|
|
|
|
const DeviceRouteMetricData *b = pb;
|
|
|
|
|
|
|
|
|
|
return a->ifindex == b->ifindex;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static guint32
|
2021-11-09 13:28:54 +01:00
|
|
|
_device_route_metric_get(NMManager *self,
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
int ifindex,
|
|
|
|
|
NMDeviceType device_type,
|
2017-12-20 12:45:02 +01:00
|
|
|
gboolean lookup_only,
|
2021-11-09 13:28:54 +01:00
|
|
|
guint32 *out_aspired_metric)
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
const DeviceRouteMetricData *d2;
|
2021-11-09 13:28:54 +01:00
|
|
|
DeviceRouteMetricData *data;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
DeviceRouteMetricData data_lookup;
|
|
|
|
|
const NMDedupMultiHeadEntry *all_links_head;
|
|
|
|
|
NMPObject links_needle;
|
|
|
|
|
guint n_links;
|
|
|
|
|
gboolean cleaned = FALSE;
|
|
|
|
|
GHashTableIter h_iter;
|
2017-12-20 12:45:02 +01:00
|
|
|
guint32 metric;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
g_return_val_if_fail(NM_IS_MANAGER(self), 0);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-12-20 12:45:02 +01:00
|
|
|
NM_SET_OUT(out_aspired_metric, 0);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
if (ifindex <= 0) {
|
|
|
|
|
if (lookup_only)
|
|
|
|
|
return 0;
|
2017-12-20 12:45:02 +01:00
|
|
|
metric = nm_device_get_route_metric_default(device_type);
|
|
|
|
|
NM_SET_OUT(out_aspired_metric, metric);
|
|
|
|
|
return metric;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
priv = NM_MANAGER_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
if (lookup_only && !priv->device_route_metrics)
|
|
|
|
|
return 0;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
if (G_UNLIKELY(!priv->device_route_metrics)) {
|
2021-11-09 13:28:54 +01:00
|
|
|
const GHashTable *h;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
const NMConfigDeviceStateData *device_state;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
priv->device_route_metrics =
|
|
|
|
|
g_hash_table_new_full(_device_route_metric_data_by_ifindex_hash,
|
|
|
|
|
_device_route_metric_data_by_ifindex_equal,
|
|
|
|
|
NULL,
|
|
|
|
|
nm_g_slice_free_fcn(DeviceRouteMetricData));
|
|
|
|
|
cleaned = TRUE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
/* we need to pre-populate the cache for all (still existing) devices from the state-file */
|
|
|
|
|
h = nm_config_device_state_get_all(priv->config);
|
|
|
|
|
if (!h)
|
|
|
|
|
goto initited;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
g_hash_table_iter_init(&h_iter, (GHashTable *) h);
|
|
|
|
|
while (g_hash_table_iter_next(&h_iter, NULL, (gpointer *) &device_state)) {
|
2017-12-20 12:45:02 +01:00
|
|
|
if (!device_state->route_metric_default_effective)
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
continue;
|
|
|
|
|
if (!nm_platform_link_get(priv->platform, device_state->ifindex)) {
|
|
|
|
|
/* we have the entry in the state file, but (currently) no such
|
|
|
|
|
* ifindex exists in platform. Most likely the entry is obsolete,
|
|
|
|
|
* hence we skip it. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2018-01-02 15:47:37 +01:00
|
|
|
if (!g_hash_table_add(
|
|
|
|
|
priv->device_route_metrics,
|
|
|
|
|
_device_route_metric_data_new(device_state->ifindex,
|
|
|
|
|
device_state->route_metric_default_aspired,
|
|
|
|
|
device_state->route_metric_default_effective)))
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
nm_assert_not_reached();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
initited:
|
|
|
|
|
data_lookup.ifindex = ifindex;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
data = g_hash_table_lookup(priv->device_route_metrics, &data_lookup);
|
|
|
|
|
if (data)
|
2017-12-20 12:45:02 +01:00
|
|
|
goto out;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
if (lookup_only)
|
|
|
|
|
return 0;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
if (!cleaned) {
|
|
|
|
|
/* get the number of all links in the platform cache. */
|
|
|
|
|
all_links_head = nm_platform_lookup_all(priv->platform,
|
|
|
|
|
NMP_CACHE_ID_TYPE_OBJECT_TYPE,
|
|
|
|
|
nmp_object_stackinit_id_link(&links_needle, 1));
|
|
|
|
|
n_links = all_links_head ? all_links_head->len : 0;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
/* on systems where a lot of devices are created and go away, the index contains
|
|
|
|
|
* a lot of stale entries. We must from time to time clean them up.
|
|
|
|
|
*
|
2020-07-04 11:37:01 +03:00
|
|
|
* Do do this cleanup, whenever we have more entries then 2 times the number of links. */
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
if (G_UNLIKELY(g_hash_table_size(priv->device_route_metrics) > NM_MAX(20, n_links * 2))) {
|
|
|
|
|
/* from time to time, we need to do some house-keeping and prune stale entries.
|
|
|
|
|
* Otherwise, on a system where interfaces frequently come and go (docker), we
|
|
|
|
|
* keep growing this cache for ifindexes that no longer exist. */
|
|
|
|
|
g_hash_table_iter_init(&h_iter, priv->device_route_metrics);
|
|
|
|
|
while (g_hash_table_iter_next(&h_iter, NULL, (gpointer *) &d2)) {
|
|
|
|
|
if (!nm_platform_link_get(priv->platform, d2->ifindex))
|
|
|
|
|
g_hash_table_iter_remove(&h_iter);
|
|
|
|
|
}
|
|
|
|
|
cleaned = TRUE;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-12-20 12:45:02 +01:00
|
|
|
data =
|
|
|
|
|
_device_route_metric_data_new(ifindex, nm_device_get_route_metric_default(device_type), 0);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
/* unfortunately, there is no stright forward way to lookup all reserved metrics.
|
|
|
|
|
* Note, that we don't only have to know which metrics are currently reserved,
|
|
|
|
|
* but also, which metrics are now seemingly un-used but caused another reserved
|
2017-12-19 10:10:15 +01:00
|
|
|
* metric to be bumped. Hence, the naive O(n^2) search :(
|
|
|
|
|
*
|
|
|
|
|
* Well, technically, since we limit bumping the metric to 50, this entire
|
|
|
|
|
* loop runs at most 50 times, so it's still O(n). Let's just say, it's not
|
|
|
|
|
* very efficient. */
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
again:
|
|
|
|
|
g_hash_table_iter_init(&h_iter, priv->device_route_metrics);
|
|
|
|
|
while (g_hash_table_iter_next(&h_iter, NULL, (gpointer *) &d2)) {
|
|
|
|
|
if (data->effective_metric < d2->aspired_metric
|
|
|
|
|
|| data->effective_metric > d2->effective_metric) {
|
|
|
|
|
/* no overlap. Skip. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if (!cleaned && !nm_platform_link_get(priv->platform, d2->ifindex)) {
|
|
|
|
|
/* the metric seems taken, but there is no such interface. This entry
|
|
|
|
|
* is stale, forget about it. */
|
|
|
|
|
g_hash_table_iter_remove(&h_iter);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2017-12-19 10:10:15 +01:00
|
|
|
|
|
|
|
|
if (d2->effective_metric == G_MAXUINT32) {
|
|
|
|
|
/* we cannot bump the metric any further. Done.
|
|
|
|
|
*
|
|
|
|
|
* Actually, this can currently not happen because the aspired_metric
|
|
|
|
|
* are small numbers and we limit the bumping to 50. Still, for
|
|
|
|
|
* completeness... */
|
|
|
|
|
data->effective_metric = G_MAXUINT32;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-19 10:10:15 +01:00
|
|
|
if (d2->effective_metric - data->aspired_metric >= 50) {
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
/* as one active interface reserves an entire range of metrics
|
|
|
|
|
* (from aspired_metric to effective_metric), that means if you
|
|
|
|
|
* alternatingly activate two interfaces, their metric will
|
2017-12-19 10:10:15 +01:00
|
|
|
* bump each other.
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
*
|
2017-12-19 10:10:15 +01:00
|
|
|
* Limit this, bump the metric at most 50 points. */
|
|
|
|
|
data->effective_metric = data->aspired_metric + 50;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* bump the metric, and search again. */
|
2017-12-19 10:10:15 +01:00
|
|
|
data->effective_metric = d2->effective_metric + 1;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
goto again;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_LOGT(LOGD_DEVICE,
|
|
|
|
|
"default-route-metric: ifindex %d reserves metric %u (aspired %u)",
|
|
|
|
|
data->ifindex,
|
|
|
|
|
data->effective_metric,
|
|
|
|
|
data->aspired_metric);
|
|
|
|
|
|
2018-01-02 15:47:37 +01:00
|
|
|
if (!g_hash_table_add(priv->device_route_metrics, data))
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
nm_assert_not_reached();
|
|
|
|
|
|
2017-12-20 12:45:02 +01:00
|
|
|
out:
|
|
|
|
|
NM_SET_OUT(out_aspired_metric, data->aspired_metric);
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
return data->effective_metric;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
guint32
|
|
|
|
|
nm_manager_device_route_metric_reserve(NMManager *self, int ifindex, NMDeviceType device_type)
|
|
|
|
|
{
|
|
|
|
|
guint32 metric;
|
|
|
|
|
|
2017-12-20 12:45:02 +01:00
|
|
|
metric = _device_route_metric_get(self, ifindex, device_type, FALSE, NULL);
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
nm_assert(metric != 0);
|
|
|
|
|
return metric;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nm_manager_device_route_metric_clear(NMManager *self, int ifindex)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv;
|
device: generate unique default route-metrics per interface
In the past we had NMDefaultRouteManager which would coordinate adding
the default-route with identical metrics. That especially happened, when
activating two devices of the same type, without explicitly specifying
ipv4.route-metric. For example, with ethernet devices, the routes on
both interfaces would get a metric of 100.
Coordinating routes was especially necessary, because we added
routes with NLM_F_EXCL flag, akin to `ip route replace`. We not
only had to avoid that activating two devices in NetworkManager would
result in a fight over the default-route, but more importently
to preserve externally added default-routes on unmanaged interfaces.
NMDefaultRouteManager would ensure that in case of duplicate
metrics, that the device that activated first would keep the
best default-route. It would do so by bumping the metric
of the second device to find a unused metric. The bumping itself
was not very important -- MDefaultRouteManager could also just not
configure any default-routes that show up as second, the result
would be quite similar. More important was to keep the best
default-route on the first activating device until the device
deactivates or a device activates that really has a better
default-route..
Likewise, NMRouteManager would globally manage non-default-routes.
It would not do any bumping of metrics, but it would also ensure that the routes
of the device that activates first are not overwritten by a device activating
later.
However, the `ip route replace` approach has downsides, especially
that it messes with routes on other interfaces, interfaces that are
possibly not managed by NetworkManager. Another downside is, that
binding a socket to an interface might not result in correct
routes, because the route might just not be there (in case of
NMRouteManager, which wouldn't configure duplicate routes by bumping
their metric).
Since commit 77ec302714795f905301d500b9aab6c88001f32e we would no longer
use NLM_F_EXCL, but add routes akin to `ip route append`. When
activating for example two ethernet devices with no explict route
metric configuration, there are two routes like
default via 10.16.122.254 dev eth0 proto dhcp metric 100
default via 192.168.100.1 dev eth1 proto dhcp metric 100
This does not only affect default routes. In case of a multi-homing
setup you'd get
192.168.100.0/24 dev eth0 proto kernel scope link src 192.168.100.1 metric 100
192.168.100.0/24 dev eth1 proto kernel scope link src 192.168.100.1 metric 100
but it's visible the most for default-routes.
Note that we would append the routes that are activated later, as the order
of `ip route show` confirms. One might hence expect, that kernel selects
a route based on the order in the routing tables. However, that isn't
the case, and activating the second interface will non-deterministically
re-route traffic via the new interface. That will interfere badly with
with NAT, stateful firewalls, and existing connections (like TCP).
The solution is to have NMManager keep a global index of the default route-metrics
currently in use. So, instead of determining the default-route metric based solely
on the device-type, we now in addition generate default metrics that do not
overlap. For example, if you activate eth0 first, it gets route-metric 100,
and if you then activate eth1, it gets 101. Note that if you deactivate
and re-activate eth0, then it will get route-metric 102, because the
best route should stick on eth1 (which reserves the range 100 to 101).
Note that when a connection explititly selects a particular metric, then that
choice is honored (contrary to NMDefaultRouteManager which was more concerned
with avoiding conflicts, then keeping the exact metric).
https://bugzilla.redhat.com/show_bug.cgi?id=1505893
2017-12-05 16:32:04 +01:00
|
|
|
DeviceRouteMetricData data_lookup;
|
|
|
|
|
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
if (!priv->device_route_metrics)
|
|
|
|
|
return;
|
|
|
|
|
data_lookup.ifindex = ifindex;
|
|
|
|
|
if (g_hash_table_remove(priv->device_route_metrics, &data_lookup)) {
|
|
|
|
|
_LOGT(LOGD_DEVICE, "default-route-metric: ifindex %d released", ifindex);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2017-12-05 13:55:25 +01:00
|
|
|
static void
|
|
|
|
|
_delete_volatile_connection_do(NMManager *self, NMSettingsConnection *connection)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: add "external" flag for connections of external devices
When a device is not marked as unmanaged, but also not actively managed
by NetworkManager, then NetworkManager will generate an in-memory
profile to represent the active state, if the device is up and
configured (with an IP address).
Such profiles are commonly named like "eth0", and they are utterly
confusing to users, because they look as if NetworkManager actually
manages the device, when it really just shows that somebody else configures
the device.
We should express this better in the UI, hence add flags to indicate
that.
In practice, such profiles are UNSAVED, NM_GENERATED, and VOLATILE. But
add an explicit flag to represent that.
https://bugzilla.redhat.com/show_bug.cgi?id=1816202
2020-06-08 19:34:50 +02:00
|
|
|
if (!NM_FLAGS_ANY(nm_settings_connection_get_flags(connection),
|
|
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_VOLATILE
|
|
|
|
|
| NM_SETTINGS_CONNECTION_INT_FLAGS_EXTERNAL))
|
2017-12-05 13:55:25 +01:00
|
|
|
return;
|
2019-06-20 17:14:30 +02:00
|
|
|
if (!nm_settings_has_connection(priv->settings, connection))
|
|
|
|
|
return;
|
2018-04-19 15:27:54 +02:00
|
|
|
if (active_connection_find(self,
|
|
|
|
|
connection,
|
|
|
|
|
NULL,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_DEACTIVATED,
|
2021-05-13 10:49:39 +02:00
|
|
|
TRUE,
|
2018-04-19 15:27:54 +02:00
|
|
|
NULL))
|
2017-12-05 13:55:25 +01:00
|
|
|
return;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-12-05 13:55:25 +01:00
|
|
|
_LOGD(LOGD_DEVICE,
|
|
|
|
|
"volatile connection disconnected. Deleting connection '%s' (%s)",
|
|
|
|
|
nm_settings_connection_get_id(connection),
|
|
|
|
|
nm_settings_connection_get_uuid(connection));
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
nm_settings_connection_delete(connection, FALSE);
|
2017-12-05 13:55:25 +01:00
|
|
|
}
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
/* Returns: whether to notify D-Bus of the removal or not */
|
|
|
|
|
static gboolean
|
|
|
|
|
active_connection_remove(NMManager *self, NMActiveConnection *active)
|
2013-08-30 17:57:56 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2017-12-05 13:55:25 +01:00
|
|
|
gs_unref_object NMSettingsConnection *connection = NULL;
|
2017-11-23 21:30:09 +01:00
|
|
|
gboolean notify;
|
2014-06-20 20:13:14 +02:00
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
nm_assert(NM_IS_ACTIVE_CONNECTION(active));
|
|
|
|
|
nm_assert(c_list_contains(&priv->active_connections_lst_head, &active->active_connections_lst));
|
2014-06-20 20:13:14 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
notify = nm_dbus_object_is_exported(NM_DBUS_OBJECT(active));
|
2017-11-23 21:30:09 +01:00
|
|
|
|
2017-11-28 11:22:01 +01:00
|
|
|
c_list_unlink(&active->active_connections_lst);
|
2017-11-23 21:30:09 +01:00
|
|
|
g_signal_emit(self, signals[ACTIVE_CONNECTION_REMOVED], 0, active);
|
|
|
|
|
g_signal_handlers_disconnect_by_func(active, active_connection_state_changed, self);
|
|
|
|
|
g_signal_handlers_disconnect_by_func(active, active_connection_default_changed, self);
|
|
|
|
|
g_signal_handlers_disconnect_by_func(active, active_connection_parent_active, self);
|
|
|
|
|
|
2017-12-05 13:55:25 +01:00
|
|
|
connection = nm_g_object_ref(nm_active_connection_get_settings_connection(active));
|
2017-11-23 21:30:09 +01:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_clear_and_unexport(&active);
|
2017-11-23 21:30:09 +01:00
|
|
|
|
2017-12-05 13:55:25 +01:00
|
|
|
if (connection)
|
|
|
|
|
_delete_volatile_connection_do(self, connection);
|
2013-08-28 16:19:20 -05:00
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
return notify;
|
2013-08-30 17:57:56 -05:00
|
|
|
}
|
|
|
|
|
|
2012-08-22 17:11:31 -05:00
|
|
|
static gboolean
|
|
|
|
|
_active_connection_cleanup(gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2017-11-23 21:30:09 +01:00
|
|
|
NMActiveConnection *ac, *ac_safe;
|
2012-08-22 17:11:31 -05:00
|
|
|
|
|
|
|
|
priv->ac_cleanup_id = 0;
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
g_object_freeze_notify(G_OBJECT(self));
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_for_each_entry_safe (ac,
|
|
|
|
|
ac_safe,
|
|
|
|
|
&priv->active_connections_lst_head,
|
|
|
|
|
active_connections_lst) {
|
2012-08-22 17:11:31 -05:00
|
|
|
if (nm_active_connection_get_state(ac) == NM_ACTIVE_CONNECTION_STATE_DEACTIVATED) {
|
2013-08-28 16:19:20 -05:00
|
|
|
if (active_connection_remove(self, ac))
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify(self, PROP_ACTIVE_CONNECTIONS);
|
2012-08-22 17:11:31 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2013-08-28 16:19:20 -05:00
|
|
|
g_object_thaw_notify(G_OBJECT(self));
|
2012-08-22 17:11:31 -05:00
|
|
|
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
active_connection_state_changed(NMActiveConnection *active, GParamSpec *pspec, NMManager *self)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2012-08-22 17:11:31 -05:00
|
|
|
NMActiveConnectionState state;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMSettingsConnection *con;
|
2012-08-22 17:11:31 -05:00
|
|
|
|
|
|
|
|
state = nm_active_connection_get_state(active);
|
|
|
|
|
if (state == NM_ACTIVE_CONNECTION_STATE_DEACTIVATED) {
|
|
|
|
|
/* Destroy active connections from an idle handler to ensure that
|
|
|
|
|
* their last property change notifications go out, which wouldn't
|
|
|
|
|
* happen if we destroyed them immediately when their state was set
|
|
|
|
|
* to DEACTIVATED.
|
|
|
|
|
*/
|
|
|
|
|
if (!priv->ac_cleanup_id)
|
|
|
|
|
priv->ac_cleanup_id = g_idle_add(_active_connection_cleanup, self);
|
2016-09-26 16:01:27 +02:00
|
|
|
|
|
|
|
|
con = nm_active_connection_get_settings_connection(active);
|
|
|
|
|
if (con)
|
|
|
|
|
g_object_set_qdata(G_OBJECT(con), autoconnect_root_quark(), NULL);
|
2012-08-22 17:11:31 -05:00
|
|
|
}
|
2013-11-08 12:23:43 -05:00
|
|
|
|
|
|
|
|
nm_manager_update_state(self);
|
2012-08-22 17:11:31 -05:00
|
|
|
}
|
|
|
|
|
|
2014-06-06 15:30:24 -04:00
|
|
|
static void
|
|
|
|
|
active_connection_default_changed(NMActiveConnection *active, GParamSpec *pspec, NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
nm_manager_update_state(self);
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-26 16:04:45 -06:00
|
|
|
/**
|
|
|
|
|
* active_connection_add():
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @active: the #NMActiveConnection to manage
|
|
|
|
|
*
|
|
|
|
|
* Begins to track and manage @active. Increases the refcount of @active.
|
|
|
|
|
*/
|
2012-08-22 17:11:31 -05:00
|
|
|
static void
|
2018-02-05 15:17:06 +01:00
|
|
|
active_connection_add(NMManager *self, NMActiveConnection *active)
|
2012-08-22 17:11:31 -05:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
nm_assert(NM_IS_ACTIVE_CONNECTION(active));
|
|
|
|
|
nm_assert(!c_list_is_linked(&active->active_connections_lst));
|
2012-08-22 17:11:31 -05:00
|
|
|
|
2020-11-04 13:53:57 +01:00
|
|
|
c_list_link_tail(&priv->active_connections_lst_head, &active->active_connections_lst);
|
2017-11-23 21:30:09 +01:00
|
|
|
g_object_ref(active);
|
2014-02-26 16:04:45 -06:00
|
|
|
|
|
|
|
|
g_signal_connect(active,
|
|
|
|
|
"notify::" NM_ACTIVE_CONNECTION_STATE,
|
2012-08-22 17:11:31 -05:00
|
|
|
G_CALLBACK(active_connection_state_changed),
|
|
|
|
|
self);
|
2014-06-06 15:30:24 -04:00
|
|
|
g_signal_connect(active,
|
|
|
|
|
"notify::" NM_ACTIVE_CONNECTION_DEFAULT,
|
|
|
|
|
G_CALLBACK(active_connection_default_changed),
|
|
|
|
|
self);
|
|
|
|
|
g_signal_connect(active,
|
|
|
|
|
"notify::" NM_ACTIVE_CONNECTION_DEFAULT6,
|
|
|
|
|
G_CALLBACK(active_connection_default_changed),
|
|
|
|
|
self);
|
2012-08-22 18:33:17 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
if (!nm_dbus_object_is_exported(NM_DBUS_OBJECT(active)))
|
|
|
|
|
nm_dbus_object_export(NM_DBUS_OBJECT(active));
|
2018-02-05 15:17:06 +01:00
|
|
|
|
2012-08-22 18:33:17 -05:00
|
|
|
g_signal_emit(self, signals[ACTIVE_CONNECTION_ADDED], 0, active);
|
2013-08-28 16:19:20 -05:00
|
|
|
|
2018-02-05 15:17:06 +01:00
|
|
|
_notify(self, PROP_ACTIVE_CONNECTIONS);
|
2012-08-22 17:11:31 -05:00
|
|
|
}
|
|
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
const CList *
|
2012-08-22 17:21:56 -05:00
|
|
|
nm_manager_get_active_connections(NMManager *manager)
|
|
|
|
|
{
|
2017-11-23 21:30:09 +01:00
|
|
|
return &NM_MANAGER_GET_PRIVATE(manager)->active_connections_lst_head;
|
2012-08-22 17:21:56 -05:00
|
|
|
}
|
|
|
|
|
|
2013-09-12 10:28:21 -04:00
|
|
|
static NMActiveConnection *
|
2018-04-19 15:27:54 +02:00
|
|
|
active_connection_find(
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self,
|
|
|
|
|
NMSettingsConnection *sett_conn,
|
|
|
|
|
const char *uuid,
|
2018-04-19 15:42:27 +02:00
|
|
|
NMActiveConnectionState max_state /* candidates in state @max_state will be found */,
|
2021-05-13 10:49:39 +02:00
|
|
|
gboolean also_waiting_auth /* return also ACs waiting authorization */,
|
2021-11-09 13:28:54 +01:00
|
|
|
GPtrArray **out_all_matching)
|
2013-09-12 10:28:21 -04:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2017-11-23 21:30:09 +01:00
|
|
|
NMActiveConnection *ac;
|
2018-04-19 15:27:54 +02:00
|
|
|
NMActiveConnection *best_ac = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
GPtrArray *all = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert(!sett_conn || NM_IS_SETTINGS_CONNECTION(sett_conn));
|
2018-04-19 15:27:54 +02:00
|
|
|
nm_assert(!out_all_matching || !*out_all_matching);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-11-27 19:31:22 +01:00
|
|
|
c_list_for_each_entry_prev (ac, &priv->active_connections_lst_head, active_connections_lst) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *ac_conn;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
ac_conn = nm_active_connection_get_settings_connection(ac);
|
|
|
|
|
if (sett_conn && sett_conn != ac_conn)
|
2017-03-13 14:01:47 +01:00
|
|
|
continue;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (uuid && !nm_streq0(uuid, nm_settings_connection_get_uuid(ac_conn)))
|
2017-03-13 14:01:47 +01:00
|
|
|
continue;
|
|
|
|
|
if (nm_active_connection_get_state(ac) > max_state)
|
|
|
|
|
continue;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-19 15:27:54 +02:00
|
|
|
if (!out_all_matching)
|
|
|
|
|
return ac;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-19 15:27:54 +02:00
|
|
|
if (!best_ac) {
|
|
|
|
|
best_ac = ac;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-19 15:27:54 +02:00
|
|
|
if (!all) {
|
|
|
|
|
all = g_ptr_array_new_with_free_func(g_object_unref);
|
|
|
|
|
g_ptr_array_add(all, g_object_ref(best_ac));
|
|
|
|
|
}
|
|
|
|
|
g_ptr_array_add(all, g_object_ref(ac));
|
2013-09-12 10:28:21 -04:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-11-04 09:46:41 +01:00
|
|
|
if (!best_ac) {
|
2020-11-12 15:34:59 +01:00
|
|
|
AsyncOpData *async_op_data;
|
2020-11-04 09:46:41 +01:00
|
|
|
|
2021-05-13 10:49:39 +02:00
|
|
|
if (!also_waiting_auth)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2020-11-04 09:46:41 +01:00
|
|
|
c_list_for_each_entry (async_op_data, &priv->async_op_lst_head, async_op_lst) {
|
|
|
|
|
NMSettingsConnection *ac_conn;
|
|
|
|
|
|
2020-11-12 15:34:59 +01:00
|
|
|
ac = async_op_data->ac_auth.active;
|
2021-04-30 22:35:20 +02:00
|
|
|
ac_conn = _nm_active_connection_get_settings_connection(ac);
|
2020-11-04 09:46:41 +01:00
|
|
|
if (sett_conn && sett_conn != ac_conn)
|
|
|
|
|
continue;
|
|
|
|
|
if (uuid && !nm_streq0(uuid, nm_settings_connection_get_uuid(ac_conn)))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (!out_all_matching)
|
|
|
|
|
return ac;
|
|
|
|
|
|
|
|
|
|
if (!best_ac) {
|
|
|
|
|
best_ac = ac;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!all) {
|
|
|
|
|
all = g_ptr_array_new_with_free_func(g_object_unref);
|
|
|
|
|
g_ptr_array_add(all, g_object_ref(best_ac));
|
|
|
|
|
}
|
|
|
|
|
g_ptr_array_add(all, g_object_ref(ac));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!best_ac)
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-19 15:27:54 +02:00
|
|
|
/* as an optimization, we only allocate out_all_matching, if there are more
|
|
|
|
|
* than one result. If there is only one result, we only return the single
|
|
|
|
|
* element and don't bother allocating an array. That's the common case.
|
|
|
|
|
*
|
|
|
|
|
* Also, in case we have multiple results, we return the *first* one
|
|
|
|
|
* as @best_ac. */
|
|
|
|
|
nm_assert(!all || (all->len >= 2 && all->pdata[0] == best_ac));
|
|
|
|
|
|
|
|
|
|
*out_all_matching = all;
|
|
|
|
|
return best_ac;
|
2013-09-12 10:28:21 -04:00
|
|
|
}
|
|
|
|
|
|
2017-03-13 14:01:47 +01:00
|
|
|
static NMActiveConnection *
|
2021-11-09 13:28:54 +01:00
|
|
|
active_connection_find_by_connection(NMManager *self,
|
|
|
|
|
NMSettingsConnection *sett_conn,
|
|
|
|
|
NMConnection *connection,
|
2018-04-22 12:50:42 +02:00
|
|
|
NMActiveConnectionState max_state,
|
2021-11-09 13:28:54 +01:00
|
|
|
GPtrArray **out_all_matching)
|
2017-03-13 14:01:47 +01:00
|
|
|
{
|
|
|
|
|
nm_assert(NM_IS_MANAGER(self));
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert(!sett_conn || NM_IS_SETTINGS_CONNECTION(sett_conn));
|
|
|
|
|
nm_assert(!connection || NM_IS_CONNECTION(connection));
|
|
|
|
|
nm_assert(sett_conn || connection);
|
|
|
|
|
nm_assert(!connection || !sett_conn
|
|
|
|
|
|| connection == nm_settings_connection_get_connection(sett_conn));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-03-13 14:01:47 +01:00
|
|
|
/* Depending on whether connection is a settings connection,
|
|
|
|
|
* either lookup by object-identity of @connection, or compare the UUID */
|
2018-04-19 15:27:54 +02:00
|
|
|
return active_connection_find(self,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
|
|
|
|
sett_conn ? NULL : nm_connection_get_uuid(connection),
|
2018-04-22 12:50:42 +02:00
|
|
|
max_state,
|
2021-05-13 10:49:39 +02:00
|
|
|
FALSE,
|
2018-04-22 12:50:42 +02:00
|
|
|
out_all_matching);
|
2017-03-13 14:01:47 +01:00
|
|
|
}
|
|
|
|
|
|
2018-06-27 14:20:57 +02:00
|
|
|
typedef struct {
|
|
|
|
|
NMManager *self;
|
|
|
|
|
gboolean for_auto_activation;
|
|
|
|
|
} GetActivatableConnectionsFilterData;
|
|
|
|
|
|
2017-02-03 15:13:03 +01:00
|
|
|
static gboolean
|
2021-11-09 13:28:54 +01:00
|
|
|
_get_activatable_connections_filter(NMSettings *settings,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2017-02-03 15:13:03 +01:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
2018-06-27 14:20:57 +02:00
|
|
|
const GetActivatableConnectionsFilterData *d = user_data;
|
|
|
|
|
NMConnectionMultiConnect multi_connect;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: add "external" flag for connections of external devices
When a device is not marked as unmanaged, but also not actively managed
by NetworkManager, then NetworkManager will generate an in-memory
profile to represent the active state, if the device is up and
configured (with an IP address).
Such profiles are commonly named like "eth0", and they are utterly
confusing to users, because they look as if NetworkManager actually
manages the device, when it really just shows that somebody else configures
the device.
We should express this better in the UI, hence add flags to indicate
that.
In practice, such profiles are UNSAVED, NM_GENERATED, and VOLATILE. But
add an explicit flag to represent that.
https://bugzilla.redhat.com/show_bug.cgi?id=1816202
2020-06-08 19:34:50 +02:00
|
|
|
if (NM_FLAGS_ANY(nm_settings_connection_get_flags(sett_conn),
|
|
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_VOLATILE
|
|
|
|
|
| NM_SETTINGS_CONNECTION_INT_FLAGS_EXTERNAL))
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
return FALSE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
multi_connect =
|
|
|
|
|
_nm_connection_get_multi_connect(nm_settings_connection_get_connection(sett_conn));
|
2018-06-27 14:20:57 +02:00
|
|
|
if (multi_connect == NM_CONNECTION_MULTI_CONNECT_MULTIPLE
|
|
|
|
|
|| (multi_connect == NM_CONNECTION_MULTI_CONNECT_MANUAL_MULTIPLE
|
|
|
|
|
&& !d->for_auto_activation))
|
|
|
|
|
return TRUE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-19 15:42:27 +02:00
|
|
|
/* the connection is activatable, if it has no active-connections that are in state
|
|
|
|
|
* activated, activating, or waiting to be activated. */
|
2018-06-27 14:20:57 +02:00
|
|
|
return !active_connection_find(d->self,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
2018-06-27 14:20:57 +02:00
|
|
|
NULL,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATED,
|
2021-05-13 10:49:39 +02:00
|
|
|
FALSE,
|
2018-06-27 14:20:57 +02:00
|
|
|
NULL);
|
2017-02-03 15:13:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
NMSettingsConnection **
|
2018-06-27 14:20:57 +02:00
|
|
|
nm_manager_get_activatable_connections(NMManager *manager,
|
|
|
|
|
gboolean for_auto_activation,
|
|
|
|
|
gboolean sort,
|
2021-11-09 13:28:54 +01:00
|
|
|
guint *out_len)
|
2013-09-12 10:28:21 -04:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(manager);
|
2018-06-27 14:20:57 +02:00
|
|
|
const GetActivatableConnectionsFilterData d = {
|
2021-11-09 13:28:54 +01:00
|
|
|
.self = manager,
|
|
|
|
|
.for_auto_activation = for_auto_activation,
|
2018-06-27 14:20:57 +02:00
|
|
|
};
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-11-22 12:46:58 +01:00
|
|
|
return nm_settings_get_connections_clone(
|
|
|
|
|
priv->settings,
|
|
|
|
|
out_len,
|
|
|
|
|
_get_activatable_connections_filter,
|
2018-06-27 14:20:57 +02:00
|
|
|
(gpointer) &d,
|
2017-11-22 12:46:58 +01:00
|
|
|
sort ? nm_settings_connection_cmp_autoconnect_priority_p_with_data : NULL,
|
|
|
|
|
NULL);
|
2013-09-12 10:28:21 -04:00
|
|
|
}
|
|
|
|
|
|
2012-09-14 15:21:29 -05:00
|
|
|
static NMActiveConnection *
|
2018-03-23 21:49:41 +01:00
|
|
|
active_connection_get_by_path(NMManager *self, const char *path)
|
2012-09-14 15:21:29 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2017-11-23 21:30:09 +01:00
|
|
|
NMActiveConnection *ac;
|
2012-09-14 15:21:29 -05:00
|
|
|
|
2022-02-22 21:19:18 +01:00
|
|
|
ac = nm_dbus_manager_lookup_object_with_type(nm_dbus_object_get_manager(NM_DBUS_OBJECT(self)),
|
|
|
|
|
NM_TYPE_ACTIVE_CONNECTION,
|
|
|
|
|
path);
|
|
|
|
|
if (!ac || c_list_is_empty(&ac->active_connections_lst))
|
2018-03-23 21:49:41 +01:00
|
|
|
return NULL;
|
2012-09-14 15:21:29 -05:00
|
|
|
|
2018-03-23 21:49:41 +01:00
|
|
|
nm_assert(c_list_contains(&priv->active_connections_lst_head, &ac->active_connections_lst));
|
|
|
|
|
return ac;
|
2012-09-14 15:21:29 -05:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2012-08-22 17:11:31 -05:00
|
|
|
|
2015-01-30 19:52:53 +01:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
_config_changed_cb(NMConfig *config,
|
|
|
|
|
NMConfigData *config_data,
|
2015-01-21 12:58:32 +01:00
|
|
|
NMConfigChangeFlags changes,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMConfigData *old_data,
|
|
|
|
|
NMManager *self)
|
2015-01-30 19:52:53 +01:00
|
|
|
{
|
2018-04-10 15:55:16 +02:00
|
|
|
g_object_freeze_notify(G_OBJECT(self));
|
|
|
|
|
|
2015-07-03 11:06:39 +02:00
|
|
|
if (NM_FLAGS_HAS(changes, NM_CONFIG_CHANGE_GLOBAL_DNS_CONFIG))
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify(self, PROP_GLOBAL_DNS_CONFIGURATION);
|
2019-07-22 15:55:15 +01:00
|
|
|
|
|
|
|
|
if (!nm_streq0(nm_config_data_get_connectivity_uri(config_data),
|
|
|
|
|
nm_config_data_get_connectivity_uri(old_data))) {
|
|
|
|
|
if ((!nm_config_data_get_connectivity_uri(config_data))
|
|
|
|
|
!= (!nm_config_data_get_connectivity_uri(old_data)))
|
|
|
|
|
_notify(self, PROP_CONNECTIVITY_CHECK_AVAILABLE);
|
|
|
|
|
_notify(self, PROP_CONNECTIVITY_CHECK_URI);
|
|
|
|
|
}
|
2018-04-10 15:55:16 +02:00
|
|
|
|
|
|
|
|
g_object_thaw_notify(G_OBJECT(self));
|
2015-01-30 19:52:53 +01:00
|
|
|
}
|
|
|
|
|
|
2016-05-30 15:42:44 +02:00
|
|
|
static void
|
|
|
|
|
_reload_auth_cb(NMAuthChain *chain, GDBusMethodInvocation *context, gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
GError *ret_error = NULL;
|
2016-05-30 15:42:44 +02:00
|
|
|
NMAuthCallResult result;
|
|
|
|
|
guint32 flags;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMAuthSubject *subject;
|
2016-05-30 15:42:44 +02:00
|
|
|
char s_buf[60];
|
2016-05-30 16:43:39 +02:00
|
|
|
NMConfigChangeFlags reload_type = NM_CONFIG_CHANGE_NONE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-02 10:08:09 +02:00
|
|
|
nm_assert(G_IS_DBUS_METHOD_INVOCATION(context));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_unlink(nm_auth_chain_parent_lst_list(chain));
|
2016-05-30 15:42:44 +02:00
|
|
|
flags = GPOINTER_TO_UINT(nm_auth_chain_get_data(chain, "flags"));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-05-30 15:42:44 +02:00
|
|
|
subject = nm_auth_chain_get_subject(chain);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-05-30 15:42:44 +02:00
|
|
|
result = nm_auth_chain_get_result(chain, NM_AUTH_PERMISSION_RELOAD);
|
2019-05-04 09:37:54 +02:00
|
|
|
if (result != NM_AUTH_CALL_RESULT_YES) {
|
2016-05-30 15:42:44 +02:00
|
|
|
ret_error = g_error_new_literal(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Not authorized to reload configuration");
|
2016-05-30 16:43:39 +02:00
|
|
|
} else {
|
2019-09-05 14:54:22 +02:00
|
|
|
if (NM_FLAGS_ANY(flags, ~NM_MANAGER_RELOAD_FLAG_ALL)) {
|
2016-05-30 16:43:39 +02:00
|
|
|
/* invalid flags */
|
|
|
|
|
} else if (flags == 0)
|
|
|
|
|
reload_type = NM_CONFIG_CHANGE_CAUSE_SIGHUP;
|
|
|
|
|
else {
|
2019-09-05 14:54:22 +02:00
|
|
|
if (NM_FLAGS_HAS(flags, NM_MANAGER_RELOAD_FLAG_CONF))
|
2016-05-30 16:43:39 +02:00
|
|
|
reload_type |= NM_CONFIG_CHANGE_CAUSE_CONF;
|
2019-09-05 14:54:22 +02:00
|
|
|
if (NM_FLAGS_HAS(flags, NM_MANAGER_RELOAD_FLAG_DNS_RC))
|
2016-05-30 16:43:39 +02:00
|
|
|
reload_type |= NM_CONFIG_CHANGE_CAUSE_DNS_RC;
|
2019-09-05 14:54:22 +02:00
|
|
|
if (NM_FLAGS_HAS(flags, NM_MANAGER_RELOAD_FLAG_DNS_FULL))
|
2016-05-30 16:43:39 +02:00
|
|
|
reload_type |= NM_CONFIG_CHANGE_CAUSE_DNS_FULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-05-30 16:43:39 +02:00
|
|
|
if (reload_type == NM_CONFIG_CHANGE_NONE) {
|
|
|
|
|
ret_error = g_error_new_literal(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_INVALID_ARGUMENTS,
|
|
|
|
|
"Invalid flags for reload");
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2016-05-30 16:43:39 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-05-30 15:42:44 +02:00
|
|
|
nm_audit_log_control_op(NM_AUDIT_OP_RELOAD,
|
|
|
|
|
nm_sprintf_buf(s_buf, "%u", flags),
|
|
|
|
|
ret_error == NULL,
|
|
|
|
|
subject,
|
|
|
|
|
ret_error ? ret_error->message : NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-05-30 15:42:44 +02:00
|
|
|
if (ret_error) {
|
|
|
|
|
g_dbus_method_invocation_take_error(context, ret_error);
|
2019-05-02 10:08:09 +02:00
|
|
|
return;
|
2016-05-30 15:42:44 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-11-09 18:06:32 +01:00
|
|
|
nm_config_reload(priv->config, reload_type, TRUE);
|
2016-05-30 15:42:44 +02:00
|
|
|
g_dbus_method_invocation_return_value(context, NULL);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_reload(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(obj);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMAuthChain *chain;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
guint32 flags;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get(parameters, "(u)", &flags);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context(invocation, _reload_auth_cb, self);
|
2016-05-30 15:42:44 +02:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal(invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2019-09-04 10:18:56 +02:00
|
|
|
NM_UTILS_ERROR_MSG_REQ_AUTH_FAILED);
|
2016-05-30 15:42:44 +02:00
|
|
|
return;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_link_tail(&priv->auth_lst_head, nm_auth_chain_parent_lst_list(chain));
|
2016-05-30 15:42:44 +02:00
|
|
|
nm_auth_chain_set_data(chain, "flags", GUINT_TO_POINTER(flags), NULL);
|
|
|
|
|
nm_auth_chain_add_call(chain, NM_AUTH_PERMISSION_RELOAD, TRUE);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-01-30 19:52:53 +01:00
|
|
|
|
2016-07-01 12:11:01 +02:00
|
|
|
NMDevice *
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
nm_manager_get_device_by_path(NMManager *self, const char *path)
|
2011-01-10 23:39:12 -06:00
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2011-01-10 23:39:12 -06:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
g_return_val_if_fail(path, NULL);
|
2011-03-15 17:04:35 -05:00
|
|
|
|
2022-02-22 21:19:18 +01:00
|
|
|
device =
|
|
|
|
|
nm_dbus_manager_lookup_object_with_type(nm_dbus_object_get_manager(NM_DBUS_OBJECT(self)),
|
|
|
|
|
NM_TYPE_DEVICE,
|
|
|
|
|
path);
|
|
|
|
|
if (!device || c_list_is_empty(&device->devices_lst))
|
2018-03-23 21:49:41 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
nm_assert(c_list_contains(&priv->devices_lst_head, &device->devices_lst));
|
|
|
|
|
return device;
|
2011-01-10 23:39:12 -06:00
|
|
|
}
|
|
|
|
|
|
2013-05-03 13:55:51 -04:00
|
|
|
NMDevice *
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
nm_manager_get_device_by_ifindex(NMManager *self, int ifindex)
|
2013-05-03 13:55:51 -04:00
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2013-05-03 13:55:51 -04:00
|
|
|
|
2018-06-22 15:47:41 +02:00
|
|
|
if (ifindex > 0) {
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if (nm_device_get_ifindex(device) == ifindex)
|
|
|
|
|
return device;
|
|
|
|
|
}
|
2013-05-03 13:55:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-29 10:56:36 +02:00
|
|
|
static NMDevice *
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
find_device_by_permanent_hw_addr(NMManager *self, const char *hwaddr)
|
2015-04-29 10:56:36 +02:00
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
|
|
|
|
const char *device_addr;
|
2021-03-03 20:57:01 +01:00
|
|
|
guint8 hwaddr_bin[_NM_UTILS_HWADDR_LEN_MAX];
|
2018-03-24 11:31:29 +01:00
|
|
|
gsize hwaddr_len;
|
2015-04-29 10:56:36 +02:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
g_return_val_if_fail(hwaddr != NULL, NULL);
|
2015-04-29 10:56:36 +02:00
|
|
|
|
2018-03-24 11:31:29 +01:00
|
|
|
if (!_nm_utils_hwaddr_aton(hwaddr, hwaddr_bin, sizeof(hwaddr_bin), &hwaddr_len))
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
device_addr = nm_device_get_permanent_hw_address(device);
|
2018-03-24 11:31:29 +01:00
|
|
|
if (device_addr && nm_utils_hwaddr_matches(hwaddr_bin, hwaddr_len, device_addr, -1))
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
return device;
|
2015-04-29 10:56:36 +02:00
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static NMDevice *
|
all: don't use gchar/gshort/gint/glong but C types
We commonly don't use the glib typedefs for char/short/int/long,
but their C types directly.
$ git grep '\<g\(char\|short\|int\|long\|float\|double\)\>' | wc -l
587
$ git grep '\<\(char\|short\|int\|long\|float\|double\)\>' | wc -l
21114
One could argue that using the glib typedefs is preferable in
public API (of our glib based libnm library) or where it clearly
is related to glib, like during
g_object_set (obj, PROPERTY, (gint) value, NULL);
However, that argument does not seem strong, because in practice we don't
follow that argument today, and seldomly use the glib typedefs.
Also, the style guide for this would be hard to formalize, because
"using them where clearly related to a glib" is a very loose suggestion.
Also note that glib typedefs will always just be typedefs of the
underlying C types. There is no danger of glib changing the meaning
of these typedefs (because that would be a major API break of glib).
A simple style guide is instead: don't use these typedefs.
No manual actions, I only ran the bash script:
FILES=($(git ls-files '*.[hc]'))
sed -i \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>\( [^ ]\)/\1\2/g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\> /\1 /g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>/\1/g' \
"${FILES[@]}"
2018-07-11 07:40:19 +02:00
|
|
|
find_device_by_ip_iface(NMManager *self, const char *iface)
|
2015-04-29 10:56:36 +02:00
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2015-04-29 10:56:36 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
g_return_val_if_fail(iface, NULL);
|
2014-09-24 16:58:07 -05:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if (nm_device_is_real(device) && nm_streq0(nm_device_get_ip_iface(device), iface))
|
|
|
|
|
return device;
|
2015-04-29 10:56:36 +02:00
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/**
|
|
|
|
|
* find_device_by_iface:
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @iface: the device interface to find
|
|
|
|
|
* @connection: a connection to ensure the returned device is compatible with
|
|
|
|
|
* @slave: a slave connection to ensure a master is compatible with
|
|
|
|
|
*
|
|
|
|
|
* Finds a device by interface name, preferring realized devices. If @slave
|
|
|
|
|
* is given, this function will only return master devices and will ensure
|
|
|
|
|
* @slave, when activated, can be a slave of the returned master device. If
|
|
|
|
|
* @connection is given, this function will only consider devices that are
|
|
|
|
|
* compatible with @connection.
|
|
|
|
|
*
|
|
|
|
|
* Returns: the matching #NMDevice
|
|
|
|
|
*/
|
2014-09-24 14:57:14 -05:00
|
|
|
static NMDevice *
|
2021-11-09 13:28:54 +01:00
|
|
|
find_device_by_iface(NMManager *self,
|
|
|
|
|
const char *iface,
|
2014-10-15 21:17:45 -05:00
|
|
|
NMConnection *connection,
|
|
|
|
|
NMConnection *slave)
|
2014-09-24 14:57:14 -05:00
|
|
|
{
|
2014-10-15 21:17:45 -05:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *fallback = NULL;
|
|
|
|
|
NMDevice *candidate;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
g_return_val_if_fail(iface != NULL, NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2020-07-20 09:31:03 +02:00
|
|
|
if (!nm_streq(nm_device_get_iface(candidate), iface))
|
2014-10-15 21:17:45 -05:00
|
|
|
continue;
|
2018-06-27 17:00:55 +02:00
|
|
|
if (connection && !nm_device_check_connection_compatible(candidate, connection, NULL))
|
2014-10-15 21:17:45 -05:00
|
|
|
continue;
|
|
|
|
|
if (slave) {
|
|
|
|
|
if (!nm_device_is_master(candidate))
|
|
|
|
|
continue;
|
|
|
|
|
if (!nm_device_check_slave_connection_compatible(candidate, slave))
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
if (nm_device_is_real(candidate))
|
|
|
|
|
return candidate;
|
|
|
|
|
else if (!fallback)
|
|
|
|
|
fallback = candidate;
|
2014-09-24 14:57:14 -05:00
|
|
|
}
|
2014-10-15 21:17:45 -05:00
|
|
|
return fallback;
|
2014-09-24 14:57:14 -05:00
|
|
|
}
|
|
|
|
|
|
2010-05-22 08:55:30 -07:00
|
|
|
static gboolean
|
|
|
|
|
manager_sleeping(NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
if (priv->sleeping || !priv->net_enabled)
|
|
|
|
|
return TRUE;
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-15 17:55:17 +02:00
|
|
|
static const char *
|
|
|
|
|
_nm_state_to_string(NMState state)
|
2013-07-31 09:14:39 -04:00
|
|
|
{
|
|
|
|
|
switch (state) {
|
|
|
|
|
case NM_STATE_ASLEEP:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "ASLEEP";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_DISCONNECTED:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "DISCONNECTED";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_DISCONNECTING:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "DISCONNECTING";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_CONNECTING:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "CONNECTING";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_CONNECTED_LOCAL:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "CONNECTED_LOCAL";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_CONNECTED_SITE:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "CONNECTED_SITE";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_CONNECTED_GLOBAL:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "CONNECTED_GLOBAL";
|
2013-07-31 09:14:39 -04:00
|
|
|
case NM_STATE_UNKNOWN:
|
|
|
|
|
default:
|
2014-04-15 17:55:17 +02:00
|
|
|
return "UNKNOWN";
|
2013-07-31 09:14:39 -04:00
|
|
|
}
|
2014-04-15 17:55:17 +02:00
|
|
|
}
|
|
|
|
|
|
2013-11-08 12:23:43 -05:00
|
|
|
static NMState
|
2017-03-20 13:36:00 +00:00
|
|
|
find_best_device_state(NMManager *manager)
|
2013-11-08 12:23:43 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(manager);
|
2013-11-08 12:23:43 -05:00
|
|
|
NMState best_state = NM_STATE_DISCONNECTED;
|
2017-11-23 21:30:09 +01:00
|
|
|
NMActiveConnection *ac;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_for_each_entry (ac, &priv->active_connections_lst_head, active_connections_lst) {
|
2013-11-08 12:23:43 -05:00
|
|
|
NMActiveConnectionState ac_state = nm_active_connection_get_state(ac);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-11-08 12:23:43 -05:00
|
|
|
switch (ac_state) {
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_ACTIVATED:
|
2017-11-23 21:53:04 +01:00
|
|
|
if (nm_active_connection_get_default(ac, AF_UNSPEC)) {
|
2017-10-20 10:37:11 +02:00
|
|
|
if (priv->connectivity_state == NM_CONNECTIVITY_FULL)
|
2013-11-08 12:23:43 -05:00
|
|
|
return NM_STATE_CONNECTED_GLOBAL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-01-13 14:48:29 -05:00
|
|
|
best_state = NM_STATE_CONNECTED_SITE;
|
2013-11-08 12:23:43 -05:00
|
|
|
} else {
|
|
|
|
|
if (best_state < NM_STATE_CONNECTING)
|
|
|
|
|
best_state = NM_STATE_CONNECTED_LOCAL;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_ACTIVATING:
|
2017-03-13 15:34:14 +01:00
|
|
|
if (!NM_IN_SET(nm_active_connection_get_activation_type(ac),
|
|
|
|
|
NM_ACTIVATION_TYPE_EXTERNAL,
|
|
|
|
|
NM_ACTIVATION_TYPE_ASSUME)) {
|
2013-11-08 12:23:43 -05:00
|
|
|
if (best_state != NM_STATE_CONNECTED_GLOBAL)
|
|
|
|
|
best_state = NM_STATE_CONNECTING;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_DEACTIVATING:
|
2017-03-13 15:34:14 +01:00
|
|
|
if (!NM_IN_SET(nm_active_connection_get_activation_type(ac),
|
|
|
|
|
NM_ACTIVATION_TYPE_EXTERNAL,
|
|
|
|
|
NM_ACTIVATION_TYPE_ASSUME)) {
|
2013-11-08 12:23:43 -05:00
|
|
|
if (best_state < NM_STATE_DISCONNECTING)
|
|
|
|
|
best_state = NM_STATE_DISCONNECTING;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-11-08 12:23:43 -05:00
|
|
|
return best_state;
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-03 09:15:24 +02:00
|
|
|
static void
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_manager_update_metered(NMManager *self)
|
2015-06-03 09:15:24 +02:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2015-06-03 09:15:24 +02:00
|
|
|
NMMetered value = NM_METERED_UNKNOWN;
|
|
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
g_return_if_fail(NM_IS_MANAGER(self));
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE(self);
|
2015-06-03 09:15:24 +02:00
|
|
|
|
|
|
|
|
if (priv->primary_connection) {
|
|
|
|
|
device = nm_active_connection_get_device(priv->primary_connection);
|
|
|
|
|
if (device)
|
|
|
|
|
value = nm_device_get_metered(device);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (value != priv->metered) {
|
|
|
|
|
priv->metered = value;
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD(LOGD_CORE, "new metered value: %d", (int) priv->metered);
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify(self, PROP_METERED);
|
2015-06-03 09:15:24 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 10:58:59 +01:00
|
|
|
NMMetered
|
|
|
|
|
nm_manager_get_metered(NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
g_return_val_if_fail(NM_IS_MANAGER(self), NM_METERED_UNKNOWN);
|
|
|
|
|
|
|
|
|
|
return NM_MANAGER_GET_PRIVATE(self)->metered;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-20 12:02:18 -04:00
|
|
|
static void
|
2018-04-11 11:44:47 +02:00
|
|
|
nm_manager_update_state(NMManager *self)
|
2009-05-20 12:02:18 -04:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv;
|
|
|
|
|
NMState new_state = NM_STATE_DISCONNECTED;
|
|
|
|
|
|
2018-04-11 11:44:47 +02:00
|
|
|
g_return_if_fail(NM_IS_MANAGER(self));
|
2009-05-20 12:02:18 -04:00
|
|
|
|
2018-04-11 11:44:47 +02:00
|
|
|
priv = NM_MANAGER_GET_PRIVATE(self);
|
2009-05-20 12:02:18 -04:00
|
|
|
|
2018-04-11 11:44:47 +02:00
|
|
|
if (manager_sleeping(self))
|
2009-05-20 12:02:18 -04:00
|
|
|
new_state = NM_STATE_ASLEEP;
|
2013-11-08 12:23:43 -05:00
|
|
|
else
|
2018-04-11 11:44:47 +02:00
|
|
|
new_state = find_best_device_state(self);
|
2017-02-16 10:07:10 +01:00
|
|
|
|
2017-03-20 13:36:00 +00:00
|
|
|
if (new_state >= NM_STATE_CONNECTED_LOCAL && priv->connectivity_state == NM_CONNECTIVITY_FULL) {
|
|
|
|
|
new_state = NM_STATE_CONNECTED_GLOBAL;
|
2015-01-13 15:35:10 -05:00
|
|
|
}
|
2013-07-31 09:14:39 -04:00
|
|
|
|
2018-04-11 11:44:47 +02:00
|
|
|
if (priv->state == new_state)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
priv->state = new_state;
|
|
|
|
|
|
|
|
|
|
_LOGI(LOGD_CORE, "NetworkManager state is now %s", _nm_state_to_string(new_state));
|
|
|
|
|
|
|
|
|
|
_notify(self, PROP_STATE);
|
|
|
|
|
nm_dbus_object_emit_signal(NM_DBUS_OBJECT(self),
|
|
|
|
|
&interface_info_manager,
|
|
|
|
|
&signal_info_state_changed,
|
|
|
|
|
"(u)",
|
|
|
|
|
(guint32) priv->state);
|
2009-05-20 12:02:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
manager_device_state_changed(NMDevice *device,
|
2009-05-20 12:02:18 -04:00
|
|
|
NMDeviceState new_state,
|
|
|
|
|
NMDeviceState old_state,
|
|
|
|
|
NMDeviceStateReason reason,
|
|
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
2015-12-13 22:08:23 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-03-19 14:15:56 +01:00
|
|
|
if (old_state == NM_DEVICE_STATE_UNMANAGED && new_state > NM_DEVICE_STATE_UNMANAGED)
|
|
|
|
|
retry_connections_for_parent_device(self, device);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-06 21:44:06 +02:00
|
|
|
if (NM_IN_SET(new_state,
|
|
|
|
|
NM_DEVICE_STATE_UNMANAGED,
|
|
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_DISCONNECTED,
|
|
|
|
|
NM_DEVICE_STATE_PREPARE,
|
|
|
|
|
NM_DEVICE_STATE_FAILED))
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify(self, PROP_ACTIVE_CONNECTIONS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-06 21:35:04 +02:00
|
|
|
if (NM_IN_SET(new_state,
|
|
|
|
|
NM_DEVICE_STATE_UNMANAGED,
|
|
|
|
|
NM_DEVICE_STATE_DISCONNECTED,
|
2020-03-04 15:44:53 +01:00
|
|
|
NM_DEVICE_STATE_ACTIVATED)) {
|
2020-03-04 13:38:49 +01:00
|
|
|
nm_manager_write_device_state(self, device, NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-03-04 15:44:53 +01:00
|
|
|
G_STATIC_ASSERT_EXPR(DEVICE_STATE_PRUNE_RATELIMIT_MAX < G_MAXUINT8);
|
|
|
|
|
if (priv->device_state_prune_ratelimit_count++ > DEVICE_STATE_PRUNE_RATELIMIT_MAX) {
|
|
|
|
|
/* We write the device state to /run. The state files are named after the
|
|
|
|
|
* ifindex (which is assumed to be unique and not repeat -- in practice
|
|
|
|
|
* it may repeat). So from time to time, we prune device state files
|
|
|
|
|
* for interfaces that no longer exist.
|
|
|
|
|
*
|
|
|
|
|
* Otherwise, the files might pile up if you create (and destroy) a large
|
|
|
|
|
* number of software devices. */
|
|
|
|
|
priv->device_state_prune_ratelimit_count = 0;
|
|
|
|
|
nm_config_device_state_prune_stale(NULL, priv->platform);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-06 21:44:06 +02:00
|
|
|
if (NM_IN_SET(new_state, NM_DEVICE_STATE_UNAVAILABLE, NM_DEVICE_STATE_DISCONNECTED))
|
2015-12-13 22:08:23 +01:00
|
|
|
nm_settings_device_added(priv->settings, device);
|
2009-05-20 12:02:18 -04:00
|
|
|
}
|
|
|
|
|
|
2022-04-08 12:01:51 +02:00
|
|
|
static void
|
|
|
|
|
_dns_mgr_update_pending_cb(NMDevice *device, GParamSpec *pspec, NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
check_if_startup_complete(self);
|
|
|
|
|
}
|
2013-08-13 17:45:34 -04:00
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
check_if_startup_complete(NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
|
|
|
|
const char *reason;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-13 17:45:34 -04:00
|
|
|
if (!priv->startup)
|
|
|
|
|
return;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-02-17 13:24:28 +01:00
|
|
|
if (!priv->devices_inited)
|
2015-08-25 13:32:53 +02:00
|
|
|
return;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2022-04-08 12:01:51 +02:00
|
|
|
if (nm_dns_manager_get_update_pending(nm_manager_get_dns_manager(self))) {
|
|
|
|
|
if (priv->dns_mgr_update_pending_signal_id == 0) {
|
|
|
|
|
priv->dns_mgr_update_pending_signal_id =
|
|
|
|
|
g_signal_connect(nm_manager_get_dns_manager(self),
|
|
|
|
|
"notify::" NM_DNS_MANAGER_UPDATE_PENDING,
|
|
|
|
|
G_CALLBACK(_dns_mgr_update_pending_cb),
|
|
|
|
|
self);
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nm_clear_g_signal_handler(nm_manager_get_dns_manager(self),
|
|
|
|
|
&priv->dns_mgr_update_pending_signal_id);
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2018-09-19 17:22:50 +02:00
|
|
|
reason = nm_device_has_pending_action_reason(device);
|
|
|
|
|
if (reason) {
|
|
|
|
|
_LOGD(LOGD_CORE,
|
|
|
|
|
"startup complete is waiting for device '%s' (%s)",
|
|
|
|
|
nm_device_get_iface(device),
|
|
|
|
|
reason);
|
2013-08-13 17:45:34 -04:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: rework wait-device-timeout handling and consider device compatibility
A profile can configure "connection.wait-device-timeout" to indicate
that startup complete is blocked until a suitable device around.
This is useful for NetworkManager-wait-online and initrd mode.
Previously, we looked at NMPlatform whether a link with matching
interface-name was present. That is wrong because it cannot handle
profiles that rely on "ethernet.mac-address" setting or other "match"
settings. Also, the mere presence of the link does not yet mean
that the NMDevice was created and ready. In fact, there is a race here:
NMPlatform indicates that the device is ready (unblocking NMSettings),
but there is no corresponding NMDevice yet which keeps NetworkManager
busy to block startup complete.
Rework this. Now, only check whether there is a compatible device for
the profile.
Since we wait for compatible devices, it works now not only for the
interface name. Note that we do some optimizations so that we don't have
to re-evaluate all profiles (w.r.t. all devices) whenever something on the
device changes: we only care about this when all devices finally become
ready.
Also, we no longer start the timeout for "connection.wait-device-timeout"
when the profile appears. Instead, there is one system-wide start time
(NMSettingsPrivate.startup_complete_start_timestamp_msec). That simplifies
code and makes sense: we start waiting when NetworkManager is starting, not
when the profile gets added. Also, we wait for all profiles to become
ready together.
2020-08-11 15:34:59 +02:00
|
|
|
/* All NMDevice must be ready. But also NMSettings tracks profiles that wait for
|
|
|
|
|
* ready devices via "connection.wait-device-timeout".
|
|
|
|
|
*
|
|
|
|
|
* Note that we only re-check nm_settings_get_startup_complete_blocked_reason() when
|
|
|
|
|
* all of the devices become ready (again).
|
|
|
|
|
*
|
|
|
|
|
* For example, assume we have device "eth1" and "profile-eth2" which waits for "eth2".
|
|
|
|
|
* If "eth1" is ready (no pending action), we only need to re-evaluate "profile-eth2"
|
|
|
|
|
* if we have another device ("eth2"), that becomes non-ready (had pending actions)
|
|
|
|
|
* and again become ready. We don't need to check "profile-eth2" until "eth2" becomes
|
|
|
|
|
* non-ready.
|
|
|
|
|
* That is why nm_settings_get_startup_complete_blocked_reason() only has any significance
|
|
|
|
|
* if all devices are ready too. It allows us to cut down the number of checks whether
|
|
|
|
|
* NMSettings is ready. That's because we don't need to re-evaluate on minor changes of
|
|
|
|
|
* a device, only when all devices become managed and ready. */
|
|
|
|
|
|
|
|
|
|
g_signal_handlers_block_by_func(priv->settings, settings_startup_complete_changed, self);
|
|
|
|
|
reason = nm_settings_get_startup_complete_blocked_reason(priv->settings, TRUE);
|
|
|
|
|
g_signal_handlers_unblock_by_func(priv->settings, settings_startup_complete_changed, self);
|
|
|
|
|
if (reason) {
|
|
|
|
|
_LOGD(LOGD_CORE, "startup complete is waiting for connection (%s)", reason);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-02 11:13:52 +01:00
|
|
|
/* Most of our logging is not API/stable, but this line is kinda important and
|
|
|
|
|
* what people will look for when debugging NetworkManager-wait-online.service.
|
|
|
|
|
* Take care before rewording this message. */
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGI(LOGD_CORE, "startup complete");
|
2013-08-13 17:45:34 -04:00
|
|
|
|
|
|
|
|
priv->startup = FALSE;
|
|
|
|
|
|
2017-03-14 17:39:26 +01:00
|
|
|
/* we no longer care about these signals. Startup-complete only
|
|
|
|
|
* happens once. */
|
|
|
|
|
g_signal_handlers_disconnect_by_func(priv->settings,
|
|
|
|
|
G_CALLBACK(settings_startup_complete_changed),
|
|
|
|
|
self);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func(device,
|
2017-03-14 17:39:26 +01:00
|
|
|
G_CALLBACK(device_has_pending_action_changed),
|
|
|
|
|
self);
|
2013-08-13 17:45:34 -04:00
|
|
|
}
|
2014-10-29 09:12:18 -05:00
|
|
|
|
2017-03-14 17:39:26 +01:00
|
|
|
_notify(self, PROP_STARTUP);
|
|
|
|
|
|
2016-07-01 14:30:00 +02:00
|
|
|
if (nm_config_get_configure_and_quit(priv->config))
|
2014-10-29 09:12:18 -05:00
|
|
|
g_signal_emit(self, signals[CONFIGURE_QUIT], 0);
|
2013-08-13 17:45:34 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
device_has_pending_action_changed(NMDevice *device, GParamSpec *pspec, NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
check_if_startup_complete(self);
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-18 16:04:07 -05:00
|
|
|
static void
|
|
|
|
|
settings_startup_complete_changed(NMSettings *settings, GParamSpec *pspec, NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
check_if_startup_complete(self);
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-26 11:12:39 +01:00
|
|
|
static void
|
|
|
|
|
_parent_notify_changed(NMManager *self, NMDevice *device, gboolean device_removed)
|
|
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *candidate;
|
2016-12-26 11:12:39 +01:00
|
|
|
|
|
|
|
|
nm_assert(NM_IS_DEVICE(device));
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
again:
|
|
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if (nm_device_parent_notify_changed(candidate, device, device_removed)) {
|
2016-12-26 11:12:39 +01:00
|
|
|
/* in the unlikely event that this changes anything, we start iterating
|
|
|
|
|
* again, to be sure that the device list is up-to-date. */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
goto again;
|
|
|
|
|
}
|
2016-12-26 11:12:39 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-25 17:46:33 +02:00
|
|
|
static gboolean
|
|
|
|
|
device_is_wake_on_lan(NMPlatform *platform, NMDevice *device)
|
|
|
|
|
{
|
core: avoid assertion when removing devices
remove_device() is also called when the device has no longer a valid
ifindex and so device_is_wake_on_lan() must do an extra check to avoid
the following assertion:
nmp_cache_lookup_entry_link: assertion 'ifindex > 0' failed
0 _g_log_abort () from target:/lib64/libglib-2.0.so.0
1 g_logv () from target:/lib64/libglib-2.0.so.0
2 g_log () from target:/lib64/libglib-2.0.so.0
3 nmp_cache_lookup_entry_link (cache=0xb858f0, ifindex=ifindex@entry=0) at ../src/platform/nmp-object.c:1713
4 nmp_cache_lookup_link (cache=<optimized out>, ifindex=ifindex@entry=0) at ../src/platform/nmp-object.c:1728
5 nm_platform_link_get_obj (self=self@entry=0xb85840, ifindex=ifindex@entry=0, visible_only=visible_only@entry=1) at ../src/platform/nm-platform.c:759
6 nm_platform_link_get (self=self@entry=0xb85840, ifindex=ifindex@entry=0) at ../src/platform/nm-platform.c:784
7 nm_platform_link_get_type (self=self@entry=0xb85840, ifindex=ifindex@entry=0) at ../src/platform/nm-platform.c:1065
8 link_get_wake_on_lan (platform=0xb85840, ifindex=0) at ../src/platform/nm-linux-platform.c:6963
9 nm_platform_link_get_wake_on_lan (self=self@entry=0xb85840, ifindex=0) at ../src/platform/nm-platform.c:1705
10 device_is_wake_on_lan (platform=0xb85840, device=<optimized out>) at ../src/nm-manager.c:1617
11 remove_device (self=0xbd0060, device=<optimized out>, device@entry=0xd298c0, quitting=quitting@entry=0, allow_unmanage=allow_unmanage@entry=1)
12 device_removed_cb (device=0xd298c0, user_data=0xbd0060) at ../src/nm-manager.c:1698
13 _g_closure_invoke_va () from target:/lib64/libgobject-2.0.so.0
14 g_signal_emit_valist () from target:/lib64/libgobject-2.0.so.0
15 g_signal_emit () from target:/lib64/libgobject-2.0.so.0
16 available_connections_check_delete_unrealized_on_idle (user_data=0xd298c0) at ../src/devices/nm-device.c:4446
Fixes: ca3bbede746a7d7031ba6a011c69ad7adb1dca3e
2018-12-04 19:05:27 +01:00
|
|
|
int ifindex;
|
|
|
|
|
|
|
|
|
|
ifindex = nm_device_get_ip_ifindex(device);
|
|
|
|
|
if (ifindex <= 0)
|
|
|
|
|
return FALSE;
|
|
|
|
|
return nm_platform_link_get_wake_on_lan(platform, ifindex);
|
2018-05-25 17:46:33 +02:00
|
|
|
}
|
|
|
|
|
|
2013-08-15 12:58:22 -04:00
|
|
|
static void
|
2016-03-02 11:38:26 +01:00
|
|
|
remove_device(NMManager *self, NMDevice *device, gboolean quitting)
|
2009-05-20 12:02:18 -04:00
|
|
|
{
|
2016-03-02 11:38:26 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2016-04-20 17:46:41 +02:00
|
|
|
gboolean unmanage = FALSE;
|
2022-03-21 10:19:37 +01:00
|
|
|
NMRfkillType rtype;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-17 19:22:19 +02:00
|
|
|
_LOG2D(LOGD_DEVICE,
|
|
|
|
|
device,
|
|
|
|
|
"removing device (managed %d, wol %d)",
|
|
|
|
|
nm_device_get_managed(device, FALSE),
|
2018-05-25 17:46:33 +02:00
|
|
|
device_is_wake_on_lan(priv->platform, device));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-17 19:22:19 +02:00
|
|
|
if (nm_device_get_managed(device, FALSE)) {
|
2018-05-25 17:46:33 +02:00
|
|
|
if (quitting) {
|
|
|
|
|
/* Leave configured if wo(w)lan and quitting */
|
|
|
|
|
if (device_is_wake_on_lan(priv->platform, device))
|
|
|
|
|
unmanage = FALSE;
|
|
|
|
|
else
|
|
|
|
|
unmanage = nm_device_unmanage_on_quit(device);
|
|
|
|
|
} else {
|
2016-04-01 11:04:34 +02:00
|
|
|
/* the device is already gone. Unmanage it. */
|
2016-06-21 11:04:38 +02:00
|
|
|
unmanage = TRUE;
|
2016-04-01 11:04:34 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-05-23 18:25:05 -05:00
|
|
|
if (unmanage) {
|
|
|
|
|
if (quitting)
|
2016-01-13 12:03:47 +01:00
|
|
|
nm_device_set_unmanaged_by_quitting(device);
|
2017-03-13 15:34:14 +01:00
|
|
|
else {
|
|
|
|
|
nm_device_sys_iface_state_set(device, NM_DEVICE_SYS_IFACE_STATE_REMOVED);
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
nm_device_set_unmanaged_by_flags(device,
|
|
|
|
|
NM_UNMANAGED_PLATFORM_INIT,
|
|
|
|
|
TRUE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_REMOVED);
|
2017-03-13 15:34:14 +01:00
|
|
|
}
|
2009-08-03 17:15:03 -04:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
g_signal_handlers_disconnect_matched(device, G_SIGNAL_MATCH_DATA, 0, 0, NULL, NULL, self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-11-14 11:33:12 -06:00
|
|
|
nm_settings_device_removed(priv->settings, device, quitting);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_unlink(&device->devices_lst);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-12-26 11:12:39 +01:00
|
|
|
_parent_notify_changed(self, device, TRUE);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2022-03-21 10:19:37 +01:00
|
|
|
rtype = nm_device_get_rfkill_type(device);
|
|
|
|
|
if (rtype != NM_RFKILL_TYPE_UNKNOWN)
|
|
|
|
|
_rfkill_update(self, rtype);
|
|
|
|
|
|
2014-10-06 11:21:54 -05:00
|
|
|
if (nm_device_is_real(device)) {
|
2016-04-20 17:46:41 +02:00
|
|
|
gboolean unconfigure_ip_config = !quitting || unmanage;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-20 17:46:41 +02:00
|
|
|
/* When we don't unmanage the device on shutdown, we want to preserve the DNS
|
|
|
|
|
* configuration in resolv.conf. For that, we must leak the configuration
|
|
|
|
|
* in NMPolicy/NMDnsManager. We do that, by emitting the device-removed signal
|
|
|
|
|
* with device's ip-config object still uncleared. In that case, NMPolicy
|
|
|
|
|
* never learns to unconfigure the ip-config objects and does not remove them
|
|
|
|
|
* from DNS on shutdown (which is ugly, because we don't cleanup the memory
|
|
|
|
|
* properly).
|
|
|
|
|
*
|
|
|
|
|
* Control that by passing @unconfigure_ip_config. */
|
|
|
|
|
nm_device_removed(device, unconfigure_ip_config);
|
2016-04-04 16:58:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
_emit_device_added_removed(self, device, FALSE);
|
2017-03-17 23:30:43 +01:00
|
|
|
} else {
|
|
|
|
|
/* unrealize() does not release a slave device from master and
|
|
|
|
|
* clear IP configurations, do it here */
|
|
|
|
|
nm_device_removed(device, TRUE);
|
2014-10-06 11:21:54 -05:00
|
|
|
}
|
2017-03-17 23:30:43 +01:00
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
g_signal_emit(self, signals[INTERNAL_DEVICE_REMOVED], 0, device);
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify(self, PROP_ALL_DEVICES);
|
2014-04-09 12:31:08 -05:00
|
|
|
|
2019-03-21 11:32:32 +01:00
|
|
|
update_connectivity_value(self);
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_clear_and_unexport(&device);
|
2009-05-20 12:02:18 -04:00
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
check_if_startup_complete(self);
|
2009-05-20 12:02:18 -04:00
|
|
|
}
|
|
|
|
|
|
2014-02-10 08:49:47 -06:00
|
|
|
static void
|
|
|
|
|
device_removed_cb(NMDevice *device, gpointer user_data)
|
|
|
|
|
{
|
2019-05-17 19:22:19 +02:00
|
|
|
remove_device(NM_MANAGER(user_data), device, FALSE);
|
2014-02-10 08:49:47 -06:00
|
|
|
}
|
|
|
|
|
|
2007-09-25 16:47:53 +00:00
|
|
|
NMState
|
|
|
|
|
nm_manager_get_state(NMManager *manager)
|
|
|
|
|
{
|
|
|
|
|
g_return_val_if_fail(NM_IS_MANAGER(manager), NM_STATE_UNKNOWN);
|
|
|
|
|
|
|
|
|
|
return NM_MANAGER_GET_PRIVATE(manager)->state;
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2009-06-11 00:39:12 -04:00
|
|
|
|
all: standardize on NMSettingWired:mac-address for all VLANs
Currently, ethernet-based VLANs can specify the hardware address of
the parent device (and, in theory, the cloned hardware address and MTU
of the VLAN device) by using an NMSettingWired in addition to the
NMSettingVlan.
The theory was that non-ethernet-based VLANs, when we eventually
supported them, would likewise use the setting type corresponding to
their parent device. However, this turns out to be both complicated
(the settings plugins and connection editor would have a
hard-to-impossible time figuring out which setting type to use in some
cases) and incorrect (for most L2 settings [eg, BSSID, bond mode,
etc], the VLAN can't have its own values separate from the parent
device).
What we should have done was just have :mac-address,
:cloned-mac-address, and :mtu properties on NMSettingVlan. However, at
this point, for backward-compatibility, we will just stick with using
a combination of NMSettingVlan and NMSettingWired, but we will use
NMSettingWired regardless of the underlying hardware type.
2013-09-09 09:40:40 -04:00
|
|
|
static NMDevice *
|
2021-11-09 13:28:54 +01:00
|
|
|
find_parent_device_for_connection(NMManager *self,
|
|
|
|
|
NMConnection *connection,
|
2019-11-21 18:05:11 +01:00
|
|
|
NMDeviceFactory *cached_factory,
|
2021-11-09 13:28:54 +01:00
|
|
|
const char **out_parent_spec)
|
2012-02-12 14:48:44 -06:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMDeviceFactory *factory;
|
|
|
|
|
const char *parent_name = NULL;
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *parent_connection;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *parent, *first_compatible = NULL;
|
|
|
|
|
NMDevice *candidate;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
g_return_val_if_fail(NM_IS_CONNECTION(connection), NULL);
|
2019-11-21 18:05:11 +01:00
|
|
|
NM_SET_OUT(out_parent_spec, NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-02-17 15:18:37 +01:00
|
|
|
if (!cached_factory) {
|
|
|
|
|
factory = nm_device_factory_manager_find_factory_for_connection(connection);
|
|
|
|
|
if (!factory)
|
|
|
|
|
return NULL;
|
|
|
|
|
} else
|
|
|
|
|
factory = cached_factory;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
parent_name = nm_device_factory_get_connection_parent(factory, connection);
|
|
|
|
|
if (!parent_name)
|
|
|
|
|
return NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-11-21 18:05:11 +01:00
|
|
|
NM_SET_OUT(out_parent_spec, parent_name);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/* Try as an interface name of a parent device */
|
|
|
|
|
parent = find_device_by_iface(self, parent_name, NULL, NULL);
|
2014-09-18 17:50:47 -05:00
|
|
|
if (parent)
|
|
|
|
|
return parent;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
/* Maybe a hardware address */
|
2016-06-15 13:43:34 +02:00
|
|
|
parent = find_device_by_permanent_hw_addr(self, parent_name);
|
2014-09-18 17:50:47 -05:00
|
|
|
if (parent)
|
|
|
|
|
return parent;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
/* Maybe a connection UUID */
|
2015-07-14 16:53:24 +02:00
|
|
|
parent_connection = nm_settings_get_connection_by_uuid(priv->settings, parent_name);
|
2014-09-18 17:50:47 -05:00
|
|
|
if (!parent_connection)
|
|
|
|
|
return NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-09-15 07:20:54 -04:00
|
|
|
/* Check if the parent connection is currently activated or is compatible
|
2014-09-18 17:50:47 -05:00
|
|
|
* with some known device.
|
|
|
|
|
*/
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2022-09-09 17:34:15 +02:00
|
|
|
/* For a realized device, check that it's managed; otherwise it's not
|
|
|
|
|
* compatible with any connection. If the device is unrealized then
|
|
|
|
|
* the managed state is meaningless.
|
|
|
|
|
*/
|
|
|
|
|
if (nm_device_is_real(candidate) && !nm_device_get_managed(candidate, FALSE))
|
2017-09-27 09:25:16 +02:00
|
|
|
continue;
|
|
|
|
|
|
2015-07-14 16:53:24 +02:00
|
|
|
if (nm_device_get_settings_connection(candidate) == parent_connection)
|
2014-09-18 17:50:47 -05:00
|
|
|
return candidate;
|
2013-06-10 17:32:23 -03:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
if (!first_compatible
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
&& nm_device_check_connection_compatible(
|
|
|
|
|
candidate,
|
|
|
|
|
nm_settings_connection_get_connection(parent_connection),
|
|
|
|
|
NULL))
|
2014-09-18 17:50:47 -05:00
|
|
|
first_compatible = candidate;
|
2013-06-10 17:32:23 -03:00
|
|
|
}
|
|
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
return first_compatible;
|
2013-06-10 17:32:23 -03:00
|
|
|
}
|
|
|
|
|
|
2012-02-10 13:25:39 -06:00
|
|
|
/**
|
2016-02-16 15:07:49 +01:00
|
|
|
* nm_manager_get_connection_iface:
|
2012-02-10 13:25:39 -06:00
|
|
|
* @self: the #NMManager
|
2016-02-16 15:07:49 +01:00
|
|
|
* @connection: the #NMConnection to get the interface for
|
2012-02-22 23:59:50 -06:00
|
|
|
* @out_parent: on success, the parent device if any
|
2019-11-21 18:05:11 +01:00
|
|
|
* @out_parent_spec: on return, a string specifying the parent device
|
|
|
|
|
* in the connection. This can be a device name, a MAC address or a
|
|
|
|
|
* connection UUID.
|
2014-09-18 17:50:47 -05:00
|
|
|
* @error: an error if determining the virtual interface name failed
|
2012-02-10 13:25:39 -06:00
|
|
|
*
|
|
|
|
|
* Given @connection, returns the interface name that the connection
|
2016-02-16 15:07:49 +01:00
|
|
|
* would need to use when activated. %NULL is returned if the name
|
|
|
|
|
* is not specified in connection or a the name for a virtual device
|
|
|
|
|
* could not be generated.
|
2012-02-10 13:25:39 -06:00
|
|
|
*
|
|
|
|
|
* Returns: the expected interface name (caller takes ownership), or %NULL
|
|
|
|
|
*/
|
2016-02-16 15:07:49 +01:00
|
|
|
char *
|
2021-11-09 13:28:54 +01:00
|
|
|
nm_manager_get_connection_iface(NMManager *self,
|
2016-02-16 15:07:49 +01:00
|
|
|
NMConnection *connection,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice **out_parent,
|
|
|
|
|
const char **out_parent_spec,
|
|
|
|
|
GError **error)
|
2012-02-10 13:25:39 -06:00
|
|
|
{
|
2014-09-18 17:50:47 -05:00
|
|
|
NMDeviceFactory *factory;
|
2021-11-09 13:28:54 +01:00
|
|
|
char *iface = NULL;
|
|
|
|
|
NMDevice *parent = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-11-21 18:05:11 +01:00
|
|
|
NM_SET_OUT(out_parent, NULL);
|
|
|
|
|
NM_SET_OUT(out_parent_spec, NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
factory = nm_device_factory_manager_find_factory_for_connection(connection);
|
|
|
|
|
if (!factory) {
|
2017-04-26 19:05:54 +02:00
|
|
|
if (nm_streq0(nm_connection_get_connection_type(connection),
|
|
|
|
|
NM_SETTING_GENERIC_SETTING_NAME)) {
|
|
|
|
|
/* the generic type doesn't have a factory. */
|
|
|
|
|
goto return_ifname_fom_connection;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_FAILED,
|
|
|
|
|
"NetworkManager plugin for '%s' unavailable",
|
|
|
|
|
nm_connection_get_connection_type(connection));
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-10-07 16:05:43 +02:00
|
|
|
if (!out_parent && !NM_DEVICE_FACTORY_GET_CLASS(factory)->get_connection_iface) {
|
2016-02-17 16:12:46 +01:00
|
|
|
/* optimization. Shortcut lookup of the partent device. */
|
2017-04-26 19:05:54 +02:00
|
|
|
goto return_ifname_fom_connection;
|
2016-02-17 16:12:46 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-11-21 18:05:11 +01:00
|
|
|
parent = find_parent_device_for_connection(self, connection, factory, out_parent_spec);
|
2016-02-17 15:11:02 +01:00
|
|
|
iface = nm_device_factory_get_connection_iface(factory,
|
|
|
|
|
connection,
|
|
|
|
|
parent ? nm_device_get_ip_iface(parent) : NULL,
|
|
|
|
|
error);
|
2016-01-21 17:36:08 +01:00
|
|
|
if (!iface)
|
2014-09-18 17:50:47 -05:00
|
|
|
return NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-18 17:50:47 -05:00
|
|
|
if (out_parent)
|
|
|
|
|
*out_parent = parent;
|
|
|
|
|
return iface;
|
2017-04-26 19:05:54 +02:00
|
|
|
|
|
|
|
|
return_ifname_fom_connection:
|
|
|
|
|
iface = g_strdup(nm_connection_get_interface_name(connection));
|
|
|
|
|
if (!iface) {
|
|
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_FAILED,
|
|
|
|
|
"failed to determine interface name: error determine name for %s",
|
|
|
|
|
nm_connection_get_connection_type(connection));
|
|
|
|
|
}
|
|
|
|
|
return iface;
|
2011-10-18 13:48:44 +02:00
|
|
|
}
|
|
|
|
|
|
2016-11-28 12:32:03 +00:00
|
|
|
/**
|
|
|
|
|
* nm_manager_iface_for_uuid:
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @uuid: the connection uuid
|
|
|
|
|
*
|
|
|
|
|
* Gets a link name for the given UUID. Useful for the settings plugins that
|
|
|
|
|
* wish to write configuration files compatible with tooling that can't
|
|
|
|
|
* interpret our UUIDs.
|
|
|
|
|
*
|
|
|
|
|
* Returns: An interface name; %NULL if none matches
|
|
|
|
|
*/
|
|
|
|
|
const char *
|
|
|
|
|
nm_manager_iface_for_uuid(NMManager *self, const char *uuid)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn;
|
2016-11-28 12:32:03 +00:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_settings_get_connection_by_uuid(priv->settings, uuid);
|
|
|
|
|
if (!sett_conn)
|
2016-11-28 12:32:03 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
return nm_connection_get_interface_name(nm_settings_connection_get_connection(sett_conn));
|
2016-11-28 12:32:03 +00:00
|
|
|
}
|
|
|
|
|
|
2017-10-10 15:00:59 +02:00
|
|
|
NMDevice *
|
|
|
|
|
nm_manager_get_device(NMManager *self, const char *ifname, NMDeviceType device_type)
|
2017-07-03 16:24:59 +02:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2017-07-03 16:24:59 +02:00
|
|
|
|
2017-10-10 15:00:59 +02:00
|
|
|
g_return_val_if_fail(ifname, NULL);
|
|
|
|
|
g_return_val_if_fail(device_type != NM_DEVICE_TYPE_UNKNOWN, NULL);
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if (nm_device_get_device_type(device) == device_type
|
|
|
|
|
&& nm_streq0(nm_device_get_iface(device), ifname))
|
|
|
|
|
return device;
|
2017-07-03 16:24:59 +02:00
|
|
|
}
|
|
|
|
|
|
2017-10-10 15:00:59 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
|
nm_manager_remove_device(NMManager *self, const char *ifname, NMDeviceType device_type)
|
|
|
|
|
{
|
|
|
|
|
NMDevice *d;
|
|
|
|
|
|
|
|
|
|
d = nm_manager_get_device(self, ifname, device_type);
|
|
|
|
|
if (!d)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
2019-05-17 19:22:19 +02:00
|
|
|
remove_device(self, d, FALSE);
|
2017-10-10 15:00:59 +02:00
|
|
|
return TRUE;
|
2017-07-03 16:24:59 +02:00
|
|
|
}
|
|
|
|
|
|
2012-02-10 13:25:39 -06:00
|
|
|
/**
|
|
|
|
|
* system_create_virtual_device:
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @connection: the connection which might require a virtual device
|
|
|
|
|
*
|
|
|
|
|
* If @connection requires a virtual device and one does not yet exist for it,
|
|
|
|
|
* creates that device.
|
2015-12-17 18:40:36 +01:00
|
|
|
*
|
|
|
|
|
* Returns: A #NMDevice that was just realized; %NULL if none
|
2012-02-10 13:25:39 -06:00
|
|
|
*/
|
2015-12-17 18:40:36 +01:00
|
|
|
static NMDevice *
|
2015-12-11 16:13:13 +01:00
|
|
|
system_create_virtual_device(NMManager *self, NMConnection *connection)
|
2012-02-10 13:25:39 -06:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMDeviceFactory *factory;
|
2021-06-15 00:41:45 +02:00
|
|
|
NMSettingsConnection *const *connections;
|
|
|
|
|
guint i;
|
2021-11-09 13:28:54 +01:00
|
|
|
gs_free char *iface = NULL;
|
|
|
|
|
const char *parent_spec;
|
|
|
|
|
NMDevice *device = NULL, *parent = NULL;
|
|
|
|
|
NMDevice *dev_candidate;
|
|
|
|
|
GError *error = NULL;
|
2021-06-15 00:41:45 +02:00
|
|
|
NMLogLevel log_level;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-12-18 12:09:38 +01:00
|
|
|
g_return_val_if_fail(NM_IS_MANAGER(self), NULL);
|
|
|
|
|
g_return_val_if_fail(NM_IS_CONNECTION(connection), NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-11-21 18:05:11 +01:00
|
|
|
iface = nm_manager_get_connection_iface(self, connection, &parent, &parent_spec, &error);
|
2015-12-11 16:13:13 +01:00
|
|
|
if (!iface) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3D(LOGD_DEVICE, connection, "can't get a name of a virtual device: %s", error->message);
|
2015-12-11 16:13:13 +01:00
|
|
|
g_error_free(error);
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2015-12-11 16:13:13 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-11-21 18:05:11 +01:00
|
|
|
if (parent_spec && !parent) {
|
|
|
|
|
/* parent is not ready, wait */
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-12-11 15:40:34 +01:00
|
|
|
/* See if there's a device that is already compatible with this connection */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (dev_candidate, &priv->devices_lst_head, devices_lst) {
|
2018-06-27 17:00:55 +02:00
|
|
|
if (nm_device_check_connection_compatible(dev_candidate, connection, NULL)) {
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
if (nm_device_is_real(dev_candidate)) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3D(LOGD_DEVICE, connection, "already created virtual interface name %s", iface);
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2015-12-11 15:40:34 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
device = dev_candidate;
|
2015-12-11 15:40:34 +01:00
|
|
|
break;
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2015-01-19 13:11:29 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-12-11 15:40:34 +01:00
|
|
|
if (!device) {
|
|
|
|
|
/* No matching device found. Proceed creating a new one. */
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-12-11 15:40:34 +01:00
|
|
|
factory = nm_device_factory_manager_find_factory_for_connection(connection);
|
|
|
|
|
if (!factory) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3E(LOGD_DEVICE,
|
|
|
|
|
connection,
|
|
|
|
|
"(%s) NetworkManager plugin for '%s' unavailable",
|
|
|
|
|
iface,
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_connection_get_connection_type(connection));
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2015-12-11 15:40:34 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-12-11 16:13:13 +01:00
|
|
|
device = nm_device_factory_create_device(factory, iface, NULL, connection, NULL, &error);
|
|
|
|
|
if (!device) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3W(LOGD_DEVICE, connection, "factory can't create the device: %s", error->message);
|
2015-12-11 16:13:13 +01:00
|
|
|
g_error_free(error);
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2015-12-11 16:13:13 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3D(LOGD_DEVICE, connection, "create virtual device %s", nm_device_get_iface(device));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-12-11 16:13:13 +01:00
|
|
|
if (!add_device(self, device, &error)) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG3W(LOGD_DEVICE,
|
|
|
|
|
connection,
|
|
|
|
|
"can't register the device with manager: %s",
|
|
|
|
|
error->message);
|
2015-12-11 16:13:13 +01:00
|
|
|
g_error_free(error);
|
2015-12-11 15:40:34 +01:00
|
|
|
g_object_unref(device);
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2015-12-11 15:40:34 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-12-11 15:40:34 +01:00
|
|
|
/* Add device takes a reference that NMManager still owns, so it's
|
|
|
|
|
* safe to unref here and still return @device.
|
|
|
|
|
*/
|
2015-12-08 14:51:56 +01:00
|
|
|
g_object_unref(device);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-04-08 09:26:05 +02:00
|
|
|
if (!nm_device_check_unrealized_device_managed(device)) {
|
|
|
|
|
_LOG3D(LOGD_DEVICE,
|
|
|
|
|
connection,
|
|
|
|
|
"skip activation because virtual device '%s' is unmanaged",
|
|
|
|
|
nm_device_get_iface(device));
|
|
|
|
|
return device;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-01-27 18:38:00 +01:00
|
|
|
if (!find_master(self, connection, device, NULL, NULL, NULL, &error)) {
|
|
|
|
|
_LOG3D(LOGD_DEVICE, connection, "skip activation: %s", error->message);
|
|
|
|
|
g_error_free(error);
|
|
|
|
|
return device;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
/* Create backing resources if the device has any autoconnect connections */
|
2021-06-15 00:41:45 +02:00
|
|
|
connections = nm_settings_get_connections_sorted_by_autoconnect_priority(priv->settings, NULL);
|
2017-02-03 14:15:16 +01:00
|
|
|
for (i = 0; connections[i]; i++) {
|
2021-11-09 13:28:54 +01:00
|
|
|
NMConnection *candidate = nm_settings_connection_get_connection(connections[i]);
|
2014-09-24 16:58:07 -05:00
|
|
|
NMSettingConnection *s_con;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-06-27 17:00:55 +02:00
|
|
|
if (!nm_device_check_connection_compatible(device, candidate, NULL))
|
2014-09-24 16:58:07 -05:00
|
|
|
continue;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
s_con = nm_connection_get_setting_connection(candidate);
|
|
|
|
|
g_assert(s_con);
|
2019-12-13 18:05:38 +01:00
|
|
|
if (!nm_setting_connection_get_autoconnect(s_con)
|
|
|
|
|
|| nm_settings_connection_autoconnect_is_blocked(connections[i]))
|
2014-09-24 16:58:07 -05:00
|
|
|
continue;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
/* Create any backing resources the device needs */
|
2015-12-11 16:13:13 +01:00
|
|
|
if (!nm_device_create_and_realize(device, connection, parent, &error)) {
|
2017-09-14 09:26:51 +02:00
|
|
|
log_level =
|
|
|
|
|
g_error_matches(error, NM_DEVICE_ERROR, NM_DEVICE_ERROR_MISSING_DEPENDENCIES)
|
|
|
|
|
? LOGL_DEBUG
|
|
|
|
|
: LOGL_ERR;
|
|
|
|
|
_NMLOG3(log_level,
|
|
|
|
|
LOGD_DEVICE,
|
|
|
|
|
connection,
|
|
|
|
|
"couldn't create the device: %s",
|
|
|
|
|
error->message);
|
2015-12-11 16:13:13 +01:00
|
|
|
g_error_free(error);
|
2015-12-17 18:40:36 +01:00
|
|
|
return NULL;
|
2014-09-24 16:58:07 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-09-13 18:38:59 +02:00
|
|
|
retry_connections_for_parent_device(self, device);
|
2014-09-24 16:58:07 -05:00
|
|
|
break;
|
2011-10-18 13:48:44 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-12-17 18:40:36 +01:00
|
|
|
return device;
|
2011-10-18 13:48:44 +02:00
|
|
|
}
|
|
|
|
|
|
2016-01-07 17:54:38 +01:00
|
|
|
static void
|
|
|
|
|
retry_connections_for_parent_device(NMManager *self, NMDevice *device)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-06-15 00:41:45 +02:00
|
|
|
NMSettingsConnection *const *connections;
|
|
|
|
|
guint i;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-01-07 17:54:38 +01:00
|
|
|
g_return_if_fail(device);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-06-15 00:41:45 +02:00
|
|
|
connections = nm_settings_get_connections_sorted_by_autoconnect_priority(priv->settings, NULL);
|
2017-02-03 14:15:16 +01:00
|
|
|
for (i = 0; connections[i]; i++) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn = connections[i];
|
2021-11-09 13:28:54 +01:00
|
|
|
NMConnection *connection = nm_settings_connection_get_connection(sett_conn);
|
2016-03-08 12:02:54 +01:00
|
|
|
gs_free_error GError *error = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
gs_free char *ifname = NULL;
|
|
|
|
|
NMDevice *parent;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-11-21 18:05:11 +01:00
|
|
|
parent = find_parent_device_for_connection(self, connection, NULL, NULL);
|
2016-03-08 12:02:54 +01:00
|
|
|
if (parent == device) {
|
|
|
|
|
/* Only try to activate devices that don't already exist */
|
2019-11-21 18:05:11 +01:00
|
|
|
ifname = nm_manager_get_connection_iface(self, connection, &parent, NULL, &error);
|
2016-03-08 12:02:54 +01:00
|
|
|
if (ifname) {
|
|
|
|
|
if (!nm_platform_link_get_by_ifname(NM_PLATFORM_GET, ifname))
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
connection_changed(self, sett_conn);
|
2016-03-08 12:02:54 +01:00
|
|
|
}
|
|
|
|
|
}
|
2016-01-07 17:54:38 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-18 13:48:44 +02:00
|
|
|
static void
|
2016-04-13 16:03:06 +02:00
|
|
|
connection_changed(NMManager *self, NMSettingsConnection *sett_conn)
|
2008-04-29 Dan Williams <dcbw@redhat.com>
Handle HAL dropouts better; allow NM to start up even if HAL isn't up yet.
* marshallers/nm-marshal.list
- Add marshaller
* src/NetworkManager.c
- (main): let the NMManager handle the NMHalManager
* src/nm-hal-manager.c
src/nm-hal-manager.h
- convert to a GObject, and emit singals when stuff changes. Let the
NMManager handle the signals, instead of the NMHalManager calling
into the NMManager.
* src/nm-manager.c
src/nm-manager.h
- (remove_one_device): consolidate device removals here
- (dispose): use remove_one_device()
- (nm_manager_get_device_by_udi): make static
- (deferred_hal_manager_query_devices): idle handler to query the HAL
manager for devices at startup or wakeup time
- (nm_manager_new): create and monitor the HAL manager
- (hal_manager_udi_added_cb): new function; do what
nm_manager_add_device() used to do when signalled by the hal manager
- (hal_manager_udi_removed_cb): new function; do what
nm_manager_remove_device() used to do when signalled by the hal
manager
- (hal_manager_rfkill_changed_cb): handle rfkill changes from the
hal manager
- (hal_manager_hal_reappeared_cb): when HAL comes back, remove devices
in our device list that aren't known to HAL
- (impl_manager_sleep): on wakeup, re-add devices from an idle handler;
see comments on nm-hal-manager.c::nm_manager_state_changed() a few
commits ago
- (nm_manager_get_device_by_path, nm_manager_is_udi_managed,
nm_manager_activation_pending, nm_manager_wireless_enabled,
nm_manager_wireless_hardware_enabled,
nm_manager_set_wireless_hardware_enabled): remove, unused
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3619 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-29 23:03:00 +00:00
|
|
|
{
|
2019-06-20 09:07:45 +02:00
|
|
|
NMConnection *connection;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2019-06-20 09:07:45 +02:00
|
|
|
|
core: add "external" flag for connections of external devices
When a device is not marked as unmanaged, but also not actively managed
by NetworkManager, then NetworkManager will generate an in-memory
profile to represent the active state, if the device is up and
configured (with an IP address).
Such profiles are commonly named like "eth0", and they are utterly
confusing to users, because they look as if NetworkManager actually
manages the device, when it really just shows that somebody else configures
the device.
We should express this better in the UI, hence add flags to indicate
that.
In practice, such profiles are UNSAVED, NM_GENERATED, and VOLATILE. But
add an explicit flag to represent that.
https://bugzilla.redhat.com/show_bug.cgi?id=1816202
2020-06-08 19:34:50 +02:00
|
|
|
if (NM_FLAGS_ANY(nm_settings_connection_get_flags(sett_conn),
|
|
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_VOLATILE
|
|
|
|
|
| NM_SETTINGS_CONNECTION_INT_FLAGS_EXTERNAL))
|
2019-06-20 09:07:45 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
connection = nm_settings_connection_get_connection(sett_conn);
|
2015-12-17 18:40:36 +01:00
|
|
|
|
|
|
|
|
if (!nm_connection_is_virtual(connection))
|
|
|
|
|
return;
|
|
|
|
|
|
2016-04-13 16:03:06 +02:00
|
|
|
device = system_create_virtual_device(self, connection);
|
2015-12-17 18:40:36 +01:00
|
|
|
if (!device)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* Maybe the device that was created was needed by some other
|
|
|
|
|
* connection's device (parent of a VLAN). Let the connections
|
|
|
|
|
* can use the newly created device as a parent know. */
|
2016-04-13 16:03:06 +02:00
|
|
|
retry_connections_for_parent_device(self, device);
|
2011-10-18 13:48:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2016-04-13 16:03:06 +02:00
|
|
|
connection_added_cb(NMSettings *settings, NMSettingsConnection *sett_conn, NMManager *self)
|
2011-10-18 13:48:44 +02:00
|
|
|
{
|
2020-02-18 19:09:18 +01:00
|
|
|
connection_changed(self, sett_conn);
|
2016-04-13 16:03:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
connection_updated_cb(NMSettings *settings,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
guint update_reason_u,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self)
|
2016-04-13 16:03:06 +02:00
|
|
|
{
|
2020-02-18 19:09:18 +01:00
|
|
|
connection_changed(self, sett_conn);
|
2008-05-22 14:22:31 +00:00
|
|
|
}
|
|
|
|
|
|
2022-07-11 16:07:09 +02:00
|
|
|
static void
|
|
|
|
|
connections_changed(NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMSettingsConnection *const *connections;
|
|
|
|
|
guint i;
|
|
|
|
|
|
|
|
|
|
connections = nm_settings_get_connections_sorted_by_autoconnect_priority(priv->settings, NULL);
|
|
|
|
|
for (i = 0; connections[i]; i++)
|
|
|
|
|
connection_changed(self, connections[i]);
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-05 13:55:25 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_delete_volatile_connection_all(NMManager *self, gboolean do_delete)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMCListElem *elem;
|
2017-12-05 13:55:25 +01:00
|
|
|
|
2019-06-20 17:12:48 +02:00
|
|
|
while (
|
|
|
|
|
(elem = c_list_first_entry(&priv->delete_volatile_connection_lst_head, NMCListElem, lst))) {
|
2017-12-05 13:55:25 +01:00
|
|
|
gs_unref_object NMSettingsConnection *connection = NULL;
|
|
|
|
|
|
2019-07-02 16:25:33 +02:00
|
|
|
connection = nm_c_list_elem_free_steal(elem);
|
2017-12-05 13:55:25 +01:00
|
|
|
if (do_delete)
|
|
|
|
|
_delete_volatile_connection_do(self, connection);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_delete_volatile_connection_cb(gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = user_data;
|
2017-12-05 13:55:25 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
priv->delete_volatile_connection_idle_id = 0;
|
|
|
|
|
_delete_volatile_connection_all(self, TRUE);
|
|
|
|
|
return G_SOURCE_REMOVE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
connection_flags_changed(NMSettings *settings, NMSettingsConnection *connection, gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = user_data;
|
2017-12-05 13:55:25 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: add "external" flag for connections of external devices
When a device is not marked as unmanaged, but also not actively managed
by NetworkManager, then NetworkManager will generate an in-memory
profile to represent the active state, if the device is up and
configured (with an IP address).
Such profiles are commonly named like "eth0", and they are utterly
confusing to users, because they look as if NetworkManager actually
manages the device, when it really just shows that somebody else configures
the device.
We should express this better in the UI, hence add flags to indicate
that.
In practice, such profiles are UNSAVED, NM_GENERATED, and VOLATILE. But
add an explicit flag to represent that.
https://bugzilla.redhat.com/show_bug.cgi?id=1816202
2020-06-08 19:34:50 +02:00
|
|
|
if (!NM_FLAGS_ANY(nm_settings_connection_get_flags(connection),
|
|
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_VOLATILE
|
|
|
|
|
| NM_SETTINGS_CONNECTION_INT_FLAGS_EXTERNAL))
|
2017-12-05 13:55:25 +01:00
|
|
|
return;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-19 15:27:54 +02:00
|
|
|
if (active_connection_find(self,
|
|
|
|
|
connection,
|
|
|
|
|
NULL,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_DEACTIVATED,
|
2021-05-13 10:49:39 +02:00
|
|
|
FALSE,
|
2018-04-19 15:27:54 +02:00
|
|
|
NULL)) {
|
2018-04-19 15:42:27 +02:00
|
|
|
/* the connection still has an active-connection. It will be purged
|
2017-12-05 13:55:25 +01:00
|
|
|
* when the active connection(s) get(s) removed. */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-20 17:12:48 +02:00
|
|
|
c_list_link_tail(&priv->delete_volatile_connection_lst_head,
|
|
|
|
|
&nm_c_list_elem_new_stale(g_object_ref(connection))->lst);
|
2017-12-05 13:55:25 +01:00
|
|
|
if (!priv->delete_volatile_connection_idle_id)
|
|
|
|
|
priv->delete_volatile_connection_idle_id = g_idle_add(_delete_volatile_connection_cb, self);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
static void
|
2010-10-27 20:05:23 -05:00
|
|
|
system_unmanaged_devices_changed_cb(NMSettings *settings, GParamSpec *pspec, gpointer user_data)
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
2010-10-27 20:05:23 -05:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst)
|
2022-02-02 09:39:23 +01:00
|
|
|
nm_device_set_unmanaged_by_user_settings(device, TRUE);
|
2008-09-18 Dan Williams <dcbw@redhat.com>
Implement support for honoring configured and automatic hostnames, and for
setting the configured hostname.
* introspection/nm-ip4-config.xml
src/nm-ip4-config.c
src/nm-ip4-config.h
src/dhcp-manager/nm-dhcp-manager.c
- Remove useless hostname property; it's not really part of the IPv4
config
* introspection/nm-settings-system.xml
libnm-glib/nm-dbus-settings-system.c
libnm-glib/nm-dbus-settings-system.h
- Add SetHostname() call to system settings D-Bus interface
- Add Hostname property to system settings D-Bus interface
- (nm_dbus_settings_system_save_hostname,
nm_dbus_settings_system_get_hostname): implement
* src/nm-device.c
src/nm-device.h
- (nm_device_get_dhcp4_config): implement
* src/nm-manager.c
src/nm-manager.h
- Fetch and track system settings service hostname changes, and proxy
the changes via a GObject property of the manager
* system-settings/src/nm-system-config-interface.c
system-settings/src/nm-system-config-interface.h
- Replace nm_system_config_interface_supports_add() with a capabilities
bitfield
* system-settings/src/nm-system-config-error.c
system-settings/src/nm-system-config-error.h
- Add additional errors
* system-settings/src/dbus-settings.c
system-settings/src/dbus-settings.h
- (get_property, nm_sysconfig_settings_class_init): add hostname
property; first plugin returning a hostname wins
- (impl_settings_add_connection): use plugin capabilities instead of
nm_system_config_interface_supports_add()
- (impl_settings_save_hostname): implement hostname saving
* src/NetworkManagerPolicy.c
- (lookup_thread_run_cb, lookup_thread_worker, lookup_thread_new,
lookup_thread_die): implement an asynchronous hostname lookup thread
which given an IPv4 address tries to look up the hostname for that
address with reverse DNS
- (get_best_device): split out best device code from
update_routing_and_dns()
- (update_etc_hosts): update /etc/hosts with the machine's new hostname
to preserve the 127.0.0.1 reverse mapping that so many things require
- (set_system_hostname): set a given hostname
- (update_system_hostname): implement hostname policy; a configured
hostname (from the system settings service) is used if available,
otherwise an automatically determined hostname from DHCP, VPN, etc.
If there was no automatically determined hostname, reverse DNS of
the best device's IP address will be used, and as a last resort the
hostname 'localhost.localdomain' is set.
- (update_routing_and_dns): use get_best_device(); update the system
hostname when the network config changes
- (hostname_changed): update system hostname if the system settings
service signals a hostname change
- (nm_policy_new): list for system settings service hostname changes
- (nm_policy_destroy): ensure that an in-progress hostname lookup thread
gets told to die
* system-settings/plugins/keyfile/plugin.c
system-settings/plugins/ifcfg-suse/plugin.c
- (get_property, sc_plugin_ifcfg_class_init): implement hostname and
capabilities properties
* system-settings/plugins/ifcfg-fedora/shvar.c
- (svOpenFile): re-enable R/W access of ifcfg files since the plugin
writes out /etc/sysconfig/network now
* system-settings/plugins/ifcfg-fedora/plugin.c
- (plugin_get_hostname): get hostname from /etc/sysconfig/network
- (plugin_set_hostname): save hostname to /etc/sysconfig/network
- (sc_network_changed_cb): handle changes to /etc/sysconfig/network
- (sc_plugin_ifcfg_init): monitor /etc/sysconfig/network for changes
- (get_property, set_property, sc_plugin_ifcfg_class_init): implement
hostname get/set and capabilities get
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@4077 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-09-18 15:16:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2022-01-04 18:28:46 +01:00
|
|
|
_static_hostname_changed_cb(NMHostnameManager *hostname_manager, GParamSpec *pspec, NMManager *self)
|
2008-09-18 Dan Williams <dcbw@redhat.com>
Implement support for honoring configured and automatic hostnames, and for
setting the configured hostname.
* introspection/nm-ip4-config.xml
src/nm-ip4-config.c
src/nm-ip4-config.h
src/dhcp-manager/nm-dhcp-manager.c
- Remove useless hostname property; it's not really part of the IPv4
config
* introspection/nm-settings-system.xml
libnm-glib/nm-dbus-settings-system.c
libnm-glib/nm-dbus-settings-system.h
- Add SetHostname() call to system settings D-Bus interface
- Add Hostname property to system settings D-Bus interface
- (nm_dbus_settings_system_save_hostname,
nm_dbus_settings_system_get_hostname): implement
* src/nm-device.c
src/nm-device.h
- (nm_device_get_dhcp4_config): implement
* src/nm-manager.c
src/nm-manager.h
- Fetch and track system settings service hostname changes, and proxy
the changes via a GObject property of the manager
* system-settings/src/nm-system-config-interface.c
system-settings/src/nm-system-config-interface.h
- Replace nm_system_config_interface_supports_add() with a capabilities
bitfield
* system-settings/src/nm-system-config-error.c
system-settings/src/nm-system-config-error.h
- Add additional errors
* system-settings/src/dbus-settings.c
system-settings/src/dbus-settings.h
- (get_property, nm_sysconfig_settings_class_init): add hostname
property; first plugin returning a hostname wins
- (impl_settings_add_connection): use plugin capabilities instead of
nm_system_config_interface_supports_add()
- (impl_settings_save_hostname): implement hostname saving
* src/NetworkManagerPolicy.c
- (lookup_thread_run_cb, lookup_thread_worker, lookup_thread_new,
lookup_thread_die): implement an asynchronous hostname lookup thread
which given an IPv4 address tries to look up the hostname for that
address with reverse DNS
- (get_best_device): split out best device code from
update_routing_and_dns()
- (update_etc_hosts): update /etc/hosts with the machine's new hostname
to preserve the 127.0.0.1 reverse mapping that so many things require
- (set_system_hostname): set a given hostname
- (update_system_hostname): implement hostname policy; a configured
hostname (from the system settings service) is used if available,
otherwise an automatically determined hostname from DHCP, VPN, etc.
If there was no automatically determined hostname, reverse DNS of
the best device's IP address will be used, and as a last resort the
hostname 'localhost.localdomain' is set.
- (update_routing_and_dns): use get_best_device(); update the system
hostname when the network config changes
- (hostname_changed): update system hostname if the system settings
service signals a hostname change
- (nm_policy_new): list for system settings service hostname changes
- (nm_policy_destroy): ensure that an in-progress hostname lookup thread
gets told to die
* system-settings/plugins/keyfile/plugin.c
system-settings/plugins/ifcfg-suse/plugin.c
- (get_property, sc_plugin_ifcfg_class_init): implement hostname and
capabilities properties
* system-settings/plugins/ifcfg-fedora/shvar.c
- (svOpenFile): re-enable R/W access of ifcfg files since the plugin
writes out /etc/sysconfig/network now
* system-settings/plugins/ifcfg-fedora/plugin.c
- (plugin_get_hostname): get hostname from /etc/sysconfig/network
- (plugin_set_hostname): save hostname to /etc/sysconfig/network
- (sc_network_changed_cb): handle changes to /etc/sysconfig/network
- (sc_plugin_ifcfg_init): monitor /etc/sysconfig/network for changes
- (get_property, set_property, sc_plugin_ifcfg_class_init): implement
hostname get/set and capabilities get
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@4077 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-09-18 15:16:44 +00:00
|
|
|
{
|
2017-04-23 14:20:37 +02:00
|
|
|
nm_dispatcher_call_hostname(NULL, NULL, NULL);
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2022-02-01 22:08:52 +01:00
|
|
|
static const RfkillTypeDesc _rfkill_type_desc[NM_RFKILL_TYPE_MAX] = {
|
|
|
|
|
[NM_RFKILL_TYPE_WLAN] =
|
|
|
|
|
{
|
|
|
|
|
.prop_id = PROP_WIRELESS_ENABLED,
|
|
|
|
|
.hw_prop_id = PROP_WIRELESS_HARDWARE_ENABLED,
|
|
|
|
|
.key = NM_CONFIG_STATE_PROPERTY_WIFI_ENABLED,
|
|
|
|
|
},
|
|
|
|
|
[NM_RFKILL_TYPE_WWAN] =
|
|
|
|
|
{
|
|
|
|
|
.prop_id = PROP_WWAN_ENABLED,
|
|
|
|
|
.hw_prop_id = PROP_WWAN_HARDWARE_ENABLED,
|
|
|
|
|
.key = NM_CONFIG_STATE_PROPERTY_WWAN_ENABLED,
|
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
2010-09-01 17:08:10 -05:00
|
|
|
static gboolean
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_radio_state_get_enabled(const RfkillRadioState *rstate, gboolean check_changeable)
|
2010-09-01 17:08:10 -05:00
|
|
|
{
|
2011-04-13 21:58:25 -05:00
|
|
|
gboolean enabled;
|
|
|
|
|
|
2022-02-16 15:54:44 +02:00
|
|
|
/* If the device is not owned by the os, hw_enabled will be FALSE, hence
|
|
|
|
|
* we don't need to consider os_owner here.
|
|
|
|
|
*/
|
2011-04-22 14:56:31 -05:00
|
|
|
enabled = rstate->user_enabled && rstate->hw_enabled;
|
2013-05-31 14:28:16 -05:00
|
|
|
if (check_changeable)
|
2011-04-22 14:56:31 -05:00
|
|
|
enabled &= rstate->sw_enabled;
|
2011-04-13 21:58:25 -05:00
|
|
|
return enabled;
|
2010-09-01 17:08:10 -05:00
|
|
|
}
|
|
|
|
|
|
2022-02-01 22:19:36 +01:00
|
|
|
static void
|
2022-03-21 10:19:37 +01:00
|
|
|
_rfkill_radio_state_set_from_manager(NMManager *self, NMRfkillType rtype, RfkillRadioState *rstate)
|
2022-02-01 22:19:36 +01:00
|
|
|
{
|
2022-03-21 10:19:37 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMDevice *device;
|
|
|
|
|
|
|
|
|
|
switch (nm_rfkill_manager_get_rfkill_state(priv->rfkill_mgr, rtype)) {
|
2022-03-28 11:15:24 +02:00
|
|
|
case NM_RFKILL_STATE_UNAVAILABLE:
|
|
|
|
|
rstate->sw_enabled = TRUE;
|
|
|
|
|
rstate->hw_enabled = TRUE;
|
|
|
|
|
rstate->os_owner = TRUE;
|
2022-03-21 10:19:37 +01:00
|
|
|
|
|
|
|
|
/* A rfkill-type is available when there is a compatible
|
|
|
|
|
* killswitch or a compatible device. */
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if (nm_device_get_rfkill_type(device) == rtype) {
|
|
|
|
|
rstate->available = TRUE;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
rstate->available = FALSE;
|
2022-03-28 11:15:24 +02:00
|
|
|
return;
|
2022-02-01 22:19:36 +01:00
|
|
|
case NM_RFKILL_STATE_UNBLOCKED:
|
2022-03-21 10:19:37 +01:00
|
|
|
rstate->available = TRUE;
|
2022-02-01 22:19:36 +01:00
|
|
|
rstate->sw_enabled = TRUE;
|
|
|
|
|
rstate->hw_enabled = TRUE;
|
2022-02-16 15:54:44 +02:00
|
|
|
rstate->os_owner = TRUE;
|
2022-02-01 22:19:36 +01:00
|
|
|
return;
|
|
|
|
|
case NM_RFKILL_STATE_SOFT_BLOCKED:
|
2022-03-21 10:19:37 +01:00
|
|
|
rstate->available = TRUE;
|
2022-02-01 22:19:36 +01:00
|
|
|
rstate->sw_enabled = FALSE;
|
|
|
|
|
rstate->hw_enabled = TRUE;
|
2022-02-16 15:54:44 +02:00
|
|
|
rstate->os_owner = TRUE;
|
2022-02-01 22:19:36 +01:00
|
|
|
return;
|
|
|
|
|
case NM_RFKILL_STATE_HARD_BLOCKED:
|
2022-03-21 10:19:37 +01:00
|
|
|
rstate->available = TRUE;
|
2022-02-01 22:19:36 +01:00
|
|
|
rstate->sw_enabled = FALSE;
|
|
|
|
|
rstate->hw_enabled = FALSE;
|
2022-02-16 15:54:44 +02:00
|
|
|
/* In case the OS doesn't own the NIC, we would be in NM_RFKILL_STATE_HARD_BLOCKED */
|
|
|
|
|
rstate->os_owner = TRUE;
|
2022-02-01 22:19:36 +01:00
|
|
|
return;
|
2022-02-16 15:42:33 +02:00
|
|
|
case NM_RFKILL_STATE_HARD_BLOCKED_OS_NOT_OWNER:
|
2022-03-21 10:19:37 +01:00
|
|
|
rstate->available = TRUE;
|
2022-02-16 15:42:33 +02:00
|
|
|
rstate->sw_enabled = FALSE;
|
|
|
|
|
rstate->hw_enabled = FALSE;
|
2022-02-16 15:54:44 +02:00
|
|
|
rstate->os_owner = FALSE;
|
2022-02-16 15:42:33 +02:00
|
|
|
return;
|
2022-02-01 22:19:36 +01:00
|
|
|
}
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
}
|
|
|
|
|
|
2010-09-01 17:08:10 -05:00
|
|
|
static gboolean
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_radio_state_get(NMManager *self, NMRfkillType rtype)
|
2010-09-01 17:08:10 -05:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
|
2022-02-01 20:07:18 +01:00
|
|
|
nm_assert(_NM_INT_NOT_NEGATIVE(rtype) && rtype < G_N_ELEMENTS(priv->radio_states));
|
|
|
|
|
|
2022-02-01 22:19:36 +01:00
|
|
|
return _rfkill_radio_state_get_enabled(&priv->radio_states[rtype], TRUE);
|
2010-09-01 17:08:10 -05:00
|
|
|
}
|
|
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
static void
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update_devices(NMManager *self, NMRfkillType rtype, gboolean enabled)
|
2008-02-06 16:50:43 +00:00
|
|
|
{
|
2010-09-01 17:08:10 -05:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2008-02-06 16:50:43 +00:00
|
|
|
|
2022-02-01 22:08:52 +01:00
|
|
|
_notify(self, _rfkill_type_desc[rtype].prop_id);
|
2009-11-02 17:29:53 -08:00
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
/* Don't touch devices if asleep/networking disabled */
|
2010-09-01 17:08:10 -05:00
|
|
|
if (manager_sleeping(self))
|
2009-06-11 00:39:12 -04:00
|
|
|
return;
|
2007-09-03 01:12:23 +00:00
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
/* enable/disable wireless devices as required */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2022-02-01 22:08:52 +01:00
|
|
|
if (nm_device_get_rfkill_type(device) == rtype) {
|
2017-06-07 13:13:55 +02:00
|
|
|
_LOG2D(LOGD_RFKILL,
|
|
|
|
|
device,
|
|
|
|
|
"rfkill: setting radio %s",
|
|
|
|
|
enabled ? "enabled" : "disabled");
|
2011-11-18 12:02:58 -06:00
|
|
|
nm_device_set_enabled(device, enabled);
|
2010-04-07 14:55:43 -07:00
|
|
|
}
|
2008-02-06 16:50:43 +00:00
|
|
|
}
|
2007-09-03 01:12:23 +00:00
|
|
|
}
|
|
|
|
|
|
2010-09-01 17:08:10 -05:00
|
|
|
static void
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update_one_type(NMManager *self, NMRfkillType rtype)
|
2009-11-24 10:43:43 -08:00
|
|
|
{
|
2022-02-01 22:08:52 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2022-02-01 22:19:36 +01:00
|
|
|
RfkillRadioState *rstate = &priv->radio_states[rtype];
|
2022-02-01 20:07:18 +01:00
|
|
|
gboolean old_enabled;
|
|
|
|
|
gboolean new_enabled;
|
|
|
|
|
gboolean old_rfkilled;
|
|
|
|
|
gboolean new_rfkilled;
|
|
|
|
|
gboolean old_hwe;
|
2022-03-21 10:19:37 +01:00
|
|
|
guint old_radio_flags;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2022-02-01 22:08:52 +01:00
|
|
|
nm_assert(_NM_INT_NOT_NEGATIVE(rtype) && rtype < G_N_ELEMENTS(priv->radio_states));
|
|
|
|
|
|
2022-03-21 10:19:37 +01:00
|
|
|
old_enabled = _rfkill_radio_state_get_enabled(rstate, TRUE);
|
|
|
|
|
old_rfkilled = rstate->hw_enabled && rstate->sw_enabled;
|
|
|
|
|
old_hwe = rstate->hw_enabled;
|
|
|
|
|
old_radio_flags = priv->radio_flags;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-04-15 16:25:39 -05:00
|
|
|
/* recheck kernel rfkill state */
|
2022-03-21 10:19:37 +01:00
|
|
|
_rfkill_radio_state_set_from_manager(self, rtype, rstate);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2011-04-13 21:58:25 -05:00
|
|
|
/* Print out all states affecting device enablement */
|
2022-02-01 22:08:52 +01:00
|
|
|
_LOGD(LOGD_RFKILL,
|
2022-03-21 10:19:37 +01:00
|
|
|
"rfkill: %s available %d hw-enabled %d sw-enabled %d os-owner %d",
|
2022-02-01 22:32:46 +01:00
|
|
|
nm_rfkill_type_to_string(rtype),
|
2022-03-21 10:19:37 +01:00
|
|
|
rstate->available,
|
2022-02-01 22:08:52 +01:00
|
|
|
rstate->hw_enabled,
|
2022-02-16 15:54:44 +02:00
|
|
|
rstate->sw_enabled,
|
|
|
|
|
rstate->os_owner);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2010-09-01 17:08:10 -05:00
|
|
|
/* Log new killswitch state */
|
|
|
|
|
new_rfkilled = rstate->hw_enabled && rstate->sw_enabled;
|
|
|
|
|
if (old_rfkilled != new_rfkilled) {
|
2017-06-07 13:13:55 +02:00
|
|
|
_LOGI(LOGD_RFKILL,
|
|
|
|
|
"rfkill: %s now %s by radio killswitch",
|
2022-02-01 22:32:46 +01:00
|
|
|
nm_rfkill_type_to_string(rtype),
|
2016-03-02 11:38:26 +01:00
|
|
|
new_rfkilled ? "enabled" : "disabled");
|
2010-09-01 17:08:10 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2022-03-21 10:19:37 +01:00
|
|
|
priv->radio_flags = NM_FLAGS_ASSIGN(priv->radio_flags,
|
|
|
|
|
(guint) nm_rfkill_type_to_radio_available_flag(rtype),
|
|
|
|
|
rstate->available);
|
|
|
|
|
|
|
|
|
|
/* Send out property changed signal for HW available and enabled */
|
|
|
|
|
nm_gobject_notify_together(self,
|
|
|
|
|
rstate->hw_enabled != old_hwe ? _rfkill_type_desc[rtype].hw_prop_id
|
|
|
|
|
: PROP_0,
|
|
|
|
|
priv->radio_flags != old_radio_flags ? PROP_RADIO_FLAGS : PROP_0);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2011-04-13 21:58:25 -05:00
|
|
|
/* And finally update the actual device radio state itself; respect the
|
|
|
|
|
* daemon state here because this is never called from user-triggered
|
|
|
|
|
* radio changes and we only want to ignore the daemon enabled state when
|
|
|
|
|
* handling user radio change requests.
|
|
|
|
|
*/
|
2022-02-01 22:19:36 +01:00
|
|
|
new_enabled = _rfkill_radio_state_get_enabled(rstate, TRUE);
|
2010-09-01 17:08:10 -05:00
|
|
|
if (new_enabled != old_enabled)
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update_devices(self, rtype, new_enabled);
|
2009-12-23 00:03:45 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update(NMManager *self, NMRfkillType rtype)
|
2009-12-23 00:03:45 -08:00
|
|
|
{
|
2022-02-01 22:08:52 +01:00
|
|
|
guint i;
|
2009-12-23 00:03:45 -08:00
|
|
|
|
2022-02-01 20:21:34 +01:00
|
|
|
if (rtype != NM_RFKILL_TYPE_UNKNOWN)
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update_one_type(self, rtype);
|
2011-04-13 21:58:25 -05:00
|
|
|
else {
|
2020-07-01 17:20:40 -04:00
|
|
|
/* Otherwise, sync all radio types */
|
2022-02-01 20:21:34 +01:00
|
|
|
for (i = 0; i < NM_RFKILL_TYPE_MAX; i++)
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update_one_type(self, i);
|
2009-11-24 10:43:43 -08:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-01 22:19:36 +01:00
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
#define KERN_RFKILL_OP_CHANGE_ALL 3
|
|
|
|
|
#define KERN_RFKILL_TYPE_WLAN 1
|
|
|
|
|
#define KERN_RFKILL_TYPE_WWAN 5
|
|
|
|
|
|
|
|
|
|
struct rfkill_event {
|
|
|
|
|
uint32_t idx;
|
|
|
|
|
uint8_t type;
|
|
|
|
|
uint8_t op;
|
|
|
|
|
uint8_t soft;
|
|
|
|
|
uint8_t hard;
|
|
|
|
|
} _nm_packed;
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_rfkill_update_system(NMManager *self, NMRfkillType rtype, gboolean enabled)
|
|
|
|
|
{
|
2022-02-01 22:24:45 +01:00
|
|
|
nm_auto_close int fd = -1;
|
2022-02-01 22:19:36 +01:00
|
|
|
struct rfkill_event event;
|
|
|
|
|
ssize_t len;
|
|
|
|
|
int errsv;
|
|
|
|
|
|
|
|
|
|
nm_assert(NM_IN_SET(rtype, NM_RFKILL_TYPE_WLAN, NM_RFKILL_TYPE_WWAN));
|
|
|
|
|
|
|
|
|
|
fd = open("/dev/rfkill", O_RDWR | O_CLOEXEC);
|
|
|
|
|
if (fd < 0) {
|
|
|
|
|
if (errno == EACCES)
|
|
|
|
|
_LOGW(LOGD_RFKILL,
|
|
|
|
|
"rfkill: (%s): failed to open killswitch device",
|
2022-02-01 22:32:46 +01:00
|
|
|
nm_rfkill_type_to_string(rtype));
|
2022-02-01 22:19:36 +01:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
|
|
|
|
|
_LOGW(LOGD_RFKILL,
|
|
|
|
|
"rfkill: (%s): failed to set killswitch device for "
|
|
|
|
|
"non-blocking operation",
|
2022-02-01 22:32:46 +01:00
|
|
|
nm_rfkill_type_to_string(rtype));
|
2022-02-01 22:19:36 +01:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memset(&event, 0, sizeof(event));
|
|
|
|
|
event.op = KERN_RFKILL_OP_CHANGE_ALL;
|
|
|
|
|
switch (rtype) {
|
|
|
|
|
case NM_RFKILL_TYPE_WLAN:
|
|
|
|
|
event.type = KERN_RFKILL_TYPE_WLAN;
|
|
|
|
|
break;
|
|
|
|
|
case NM_RFKILL_TYPE_WWAN:
|
|
|
|
|
event.type = KERN_RFKILL_TYPE_WWAN;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
nm_assert_not_reached();
|
|
|
|
|
}
|
|
|
|
|
event.soft = enabled ? 0 : 1;
|
|
|
|
|
|
|
|
|
|
len = write(fd, &event, sizeof(event));
|
|
|
|
|
if (len < 0) {
|
|
|
|
|
errsv = errno;
|
|
|
|
|
_LOGW(LOGD_RFKILL,
|
|
|
|
|
"rfkill: (%s): failed to change Wi-Fi killswitch state: (%d) %s",
|
2022-02-01 22:32:46 +01:00
|
|
|
nm_rfkill_type_to_string(rtype),
|
2022-02-01 22:19:36 +01:00
|
|
|
errsv,
|
|
|
|
|
nm_strerror_native(errsv));
|
|
|
|
|
} else if (len == sizeof(event)) {
|
|
|
|
|
_LOGI(LOGD_RFKILL,
|
|
|
|
|
"rfkill: %s hardware radio set %s",
|
2022-02-01 22:32:46 +01:00
|
|
|
nm_rfkill_type_to_string(rtype),
|
2022-02-01 22:19:36 +01:00
|
|
|
enabled ? "enabled" : "disabled");
|
|
|
|
|
} else {
|
|
|
|
|
/* Failed to write full structure */
|
|
|
|
|
_LOGW(LOGD_RFKILL,
|
|
|
|
|
"rfkill: (%s): failed to change Wi-Fi killswitch state",
|
2022-02-01 22:32:46 +01:00
|
|
|
nm_rfkill_type_to_string(rtype));
|
2022-02-01 22:19:36 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
_rfkill_update_from_user(NMManager *self, NMRfkillType rtype, gboolean enabled)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
RfkillRadioState *rstate = &priv->radio_states[rtype];
|
|
|
|
|
gboolean old_enabled, new_enabled;
|
|
|
|
|
|
|
|
|
|
/* Don't touch devices if asleep/networking disabled */
|
|
|
|
|
if (manager_sleeping(self))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
_LOGD(LOGD_RFKILL,
|
|
|
|
|
"rfkill: (%s): setting radio %s by user",
|
2022-02-01 22:32:46 +01:00
|
|
|
nm_rfkill_type_to_string(rtype),
|
2022-02-01 22:19:36 +01:00
|
|
|
enabled ? "enabled" : "disabled");
|
|
|
|
|
|
|
|
|
|
/* Update enabled key in state file */
|
|
|
|
|
nm_config_state_set(priv->config, TRUE, FALSE, _rfkill_type_desc[rtype].key, enabled);
|
|
|
|
|
|
|
|
|
|
/* When the user toggles the radio, their request should override any
|
|
|
|
|
* daemon (like ModemManager) enabled state that can be changed. For WWAN
|
|
|
|
|
* for example, we want the WwanEnabled property to reflect the daemon state
|
|
|
|
|
* too so that users can toggle the modem powered, but we don't want that
|
|
|
|
|
* daemon state to affect whether or not the user *can* turn it on, which is
|
|
|
|
|
* what the kernel rfkill state does. So we ignore daemon enabled state
|
|
|
|
|
* when determining what the new state should be since it shouldn't block
|
|
|
|
|
* the user's request.
|
|
|
|
|
*/
|
|
|
|
|
old_enabled = _rfkill_radio_state_get_enabled(rstate, TRUE);
|
|
|
|
|
rstate->user_enabled = enabled;
|
|
|
|
|
new_enabled = _rfkill_radio_state_get_enabled(rstate, FALSE);
|
|
|
|
|
if (new_enabled != old_enabled) {
|
|
|
|
|
/* Try to change the kernel rfkill state */
|
|
|
|
|
_rfkill_update_system(self, rtype, new_enabled);
|
|
|
|
|
_rfkill_update_devices(self, rtype, new_enabled);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2012-06-01 15:27:39 -05:00
|
|
|
static void
|
|
|
|
|
device_auth_done_cb(NMAuthChain *chain, GDBusMethodInvocation *context, gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
|
|
|
|
gs_free_error GError *error = NULL;
|
2012-06-01 15:27:39 -05:00
|
|
|
NMAuthCallResult result;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
|
|
|
|
GCancellable *cancellable;
|
|
|
|
|
const char *permission;
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
NMManagerDeviceAuthRequestFunc callback;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMAuthSubject *subject;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-02 10:08:09 +02:00
|
|
|
nm_assert(G_IS_DBUS_METHOD_INVOCATION(context));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_unlink(nm_auth_chain_parent_lst_list(chain));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-06 09:53:26 +02:00
|
|
|
permission = nm_auth_chain_get_data(chain, "perm");
|
2019-05-02 10:08:09 +02:00
|
|
|
nm_assert(permission);
|
2012-06-01 15:27:39 -05:00
|
|
|
callback = nm_auth_chain_get_data(chain, "callback");
|
2019-05-02 10:08:09 +02:00
|
|
|
nm_assert(callback);
|
2012-06-01 15:27:39 -05:00
|
|
|
device = nm_auth_chain_get_data(chain, "device");
|
2019-05-02 10:08:09 +02:00
|
|
|
nm_assert(NM_IS_DEVICE(device));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
cancellable = nm_auth_chain_get_cancellable(chain);
|
|
|
|
|
nm_assert(!cancellable || G_IS_CANCELLABLE(cancellable));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-10-08 12:52:15 -05:00
|
|
|
result = nm_auth_chain_get_result(chain, permission);
|
2015-07-14 10:19:19 +02:00
|
|
|
subject = nm_auth_chain_get_subject(chain);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
if (cancellable && g_cancellable_set_error_if_cancelled(cancellable, &error)) {
|
|
|
|
|
/* pass. */
|
|
|
|
|
} else {
|
|
|
|
|
if (result != NM_AUTH_CALL_RESULT_YES) {
|
|
|
|
|
_LOGD(LOGD_CORE, "%s request failed: not authorized", permission);
|
|
|
|
|
error = g_error_new(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"%s request failed: not authorized",
|
|
|
|
|
permission);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
nm_assert(error || (result == NM_AUTH_CALL_RESULT_YES));
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-06-01 15:27:39 -05:00
|
|
|
callback(device, context, subject, error, nm_auth_chain_get_data(chain, "user-data"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
_device_auth_done_fail_on_idle(gpointer user_data, GCancellable *cancellable)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
gs_unref_object NMManager *self = NULL;
|
|
|
|
|
gs_unref_object NMDevice *device = NULL;
|
|
|
|
|
gs_unref_object GDBusMethodInvocation *context = NULL;
|
|
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
|
|
|
|
gs_free_error GError *error_original = NULL;
|
|
|
|
|
gs_free_error GError *error_cancelled = NULL;
|
|
|
|
|
NMManagerDeviceAuthRequestFunc callback;
|
|
|
|
|
gpointer callback_user_data;
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
|
2020-10-22 14:40:15 +02:00
|
|
|
nm_utils_user_data_unpack(user_data,
|
|
|
|
|
&self,
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
&device,
|
|
|
|
|
&context,
|
|
|
|
|
&subject,
|
|
|
|
|
&error_original,
|
|
|
|
|
&callback,
|
|
|
|
|
&callback_user_data);
|
|
|
|
|
|
|
|
|
|
g_cancellable_set_error_if_cancelled(cancellable, &error_cancelled);
|
|
|
|
|
|
|
|
|
|
callback(device, context, subject, error_cancelled ?: error_original, callback_user_data);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2021-11-09 13:28:54 +01:00
|
|
|
nm_manager_device_auth_request(NMManager *self,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
GDBusMethodInvocation *context,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
const char *permission,
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
gboolean allow_interaction,
|
2021-11-09 13:28:54 +01:00
|
|
|
GCancellable *cancellable,
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
NMManagerDeviceAuthRequestFunc callback,
|
|
|
|
|
gpointer user_data)
|
2012-06-01 15:27:39 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
gs_free_error GError *error = NULL;
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMAuthChain *chain;
|
|
|
|
|
char *permission_dup;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-17 11:18:23 -06:00
|
|
|
/* Validate the caller */
|
2019-12-19 11:30:38 +01:00
|
|
|
subject = nm_dbus_manager_new_auth_subject_from_context(context);
|
2014-01-17 11:18:23 -06:00
|
|
|
if (!subject) {
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
g_set_error_literal(&error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
NM_UTILS_ERROR_MSG_REQ_UID_UKNOWN);
|
|
|
|
|
goto fail_on_idle;
|
2014-01-17 11:18:23 -06:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-17 11:18:23 -06:00
|
|
|
/* Ensure the subject has permissions for this connection */
|
2018-04-12 09:48:16 +02:00
|
|
|
if (connection
|
|
|
|
|
&& !nm_auth_is_subject_in_acl_set_error(connection,
|
|
|
|
|
subject,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
&error))
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
goto fail_on_idle;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-17 11:18:23 -06:00
|
|
|
chain = nm_auth_chain_new_subject(subject, context, device_auth_done_cb, self);
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
if (cancellable)
|
|
|
|
|
nm_auth_chain_set_cancellable(chain, cancellable);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-04 10:31:18 +02:00
|
|
|
permission_dup = g_strdup(permission);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_link_tail(&priv->auth_lst_head, nm_auth_chain_parent_lst_list(chain));
|
2013-07-29 11:53:23 -05:00
|
|
|
nm_auth_chain_set_data(chain, "device", g_object_ref(device), g_object_unref);
|
|
|
|
|
nm_auth_chain_set_data(chain, "callback", callback, NULL);
|
|
|
|
|
nm_auth_chain_set_data(chain, "user-data", user_data, NULL);
|
2019-05-06 09:53:26 +02:00
|
|
|
nm_auth_chain_set_data(chain, "perm", permission_dup /* transfer ownership */, g_free);
|
2019-05-04 10:31:18 +02:00
|
|
|
nm_auth_chain_add_call_unsafe(chain, permission_dup, allow_interaction);
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
return;
|
2014-01-17 11:18:23 -06:00
|
|
|
|
device: implement "auth-request" as async operation nm_manager_device_auth_request()
GObject signals only complicate the code and are less efficient.
Also, NM_DEVICE_AUTH_REQUEST signal really invoked an asynchronous
request. Of course, fundamentally emitting a signal *is* the same as
calling a method. However, implementing this as signal is really not
nice nor best practice. For one, there is a (negligible) overhead emitting
a GObject signal. But what is worse, GObject signals are not as strongly
typed and make it harder to understand what happens.
The signal had the appearance of providing some special decoupling of
NMDevice and NMManager. Of course, in practice, they were not more
decoupled (both forms are the same in nature), but it was harder to
understand how they work together.
Add and call a method nm_manager_device_auth_request() instead. This
has the notion of invoking an asynchronous method. Also, never invoke
the callback synchronously and provide a cancellable. Like every asynchronous
operation, it *must* be cancellable, and callers should make sure to
provide a mechanism to abort.
2020-04-26 13:59:13 +02:00
|
|
|
fail_on_idle:
|
|
|
|
|
nm_utils_invoke_on_idle(cancellable,
|
|
|
|
|
_device_auth_done_fail_on_idle,
|
|
|
|
|
nm_utils_user_data_pack(g_object_ref(self),
|
|
|
|
|
g_object_ref(device),
|
|
|
|
|
g_object_ref(context),
|
|
|
|
|
g_steal_pointer(&subject),
|
|
|
|
|
g_steal_pointer(&error),
|
|
|
|
|
callback,
|
|
|
|
|
user_data));
|
2012-06-01 15:27:39 -05:00
|
|
|
}
|
|
|
|
|
|
2018-08-08 11:33:31 +02:00
|
|
|
static gboolean
|
|
|
|
|
new_activation_allowed_for_connection(NMManager *self, NMSettingsConnection *connection)
|
|
|
|
|
{
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (NM_IN_SET(
|
|
|
|
|
_nm_connection_get_multi_connect(nm_settings_connection_get_connection(connection)),
|
2018-08-08 11:33:31 +02:00
|
|
|
NM_CONNECTION_MULTI_CONNECT_MANUAL_MULTIPLE,
|
|
|
|
|
NM_CONNECTION_MULTI_CONNECT_MULTIPLE))
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
|
|
return !active_connection_find(self,
|
|
|
|
|
connection,
|
|
|
|
|
NULL,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATED,
|
2021-05-13 10:49:39 +02:00
|
|
|
FALSE,
|
2018-08-08 11:33:31 +02:00
|
|
|
NULL);
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-27 14:39:13 +02:00
|
|
|
/**
|
2013-11-02 10:40:58 -05:00
|
|
|
* get_existing_connection:
|
2013-06-27 14:39:13 +02:00
|
|
|
* @manager: #NMManager instance
|
|
|
|
|
* @device: #NMDevice instance
|
2014-06-20 20:13:14 +02:00
|
|
|
* @out_generated: (allow-none): return TRUE, if the connection was generated.
|
2013-06-27 14:39:13 +02:00
|
|
|
*
|
2013-11-02 10:40:58 -05:00
|
|
|
* Returns: a #NMSettingsConnection to be assumed by the device, or %NULL if
|
|
|
|
|
* the device does not support assuming existing connections.
|
2013-06-27 14:39:13 +02:00
|
|
|
*/
|
2015-07-14 16:53:24 +02:00
|
|
|
static NMSettingsConnection *
|
core: only assume connections that were managed in a previous run of NetworkManager
Before, we would have the concept of assumed connections, which is used
for (1) externally configured device that NetworkManager should not
touch and (2) connections that NetworkManager should gracefully take
over after a restart (seamlessly, non-destructively).
The behavior was unclear and mixed. It wasn't clear whether the device
is in no-touch mode (1) or gracefully take-over (2).
Previous commits already introduce separate activation types EXTERNAL (1)
and ASSUME (2).
Also, previously, we would for both (1) and (2) try to find a matching
connection and use it. That doesn't make sense for either.
In the external case (1), we should not pretend that an existing connection
is active. Let's always create a new in-memory connection for these
cases. Note that this means, external devices now will always generate
a connection, instead of pretending an existing one is active.
For the assume case (2), we shall not use nm_utils_match_connection() to
guess which connection might be active. It can only the one that was
active on a previous run of NetworkManager. So, use the information from
the state file and try to activate it. If that fails, it is not an
assume activation type. Note, that this means we now most of the time
don't do ASSUME anymore. Most of the time we do EXTERNAL activation
That is because the state information is only available after restart
of NetworkManager.
2017-03-08 08:45:11 +01:00
|
|
|
get_existing_connection(NMManager *self, NMDevice *device, gboolean *out_generated)
|
2013-06-27 14:39:13 +02:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
Revert "core: merge branch 'bg/restart-assume-rh1551958'"
This reverts commit cc1920d71470042c4e0837848da9183526b663d0, reversing
changes made to eb8257dea5802a004af9cccacb30af98440e2172.
This breaks restart, at least for Wi-Fi devices:
#0 0x00007ffff5ee8771 in _g_log_abort (breakpoint=breakpoint@entry=1) at gmessages.c:554
#1 0x00007ffff5ee9a5b in g_logv (log_domain=0x7ffff671a738 "GLib-GIO", log_level=G_LOG_LEVEL_CRITICAL, format=<optimized out>, args=args@entry=0x7fffffffd720) at gmessages.c:1362
#2 0x00007ffff5ee9baf in g_log (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", log_level=log_level@entry=G_LOG_LEVEL_CRITICAL, format=format@entry=0x7ffff5f347ea "%s: assertion '%s' failed") at gmessages.c:1403
#3 0x00007ffff5eea0f9 in g_return_if_fail_warning (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", pretty_function=pretty_function@entry=0x7ffff673fc10 <__func__.25628> "g_dbus_proxy_call_internal", expression=expression@entry=0x7ffff673fb1c "G_IS_DBUS_PROXY (proxy)") at gmessages.c:2702
#4 0x00007ffff66cdc5f in g_dbus_proxy_call_internal (proxy=0x0, method_name=method_name@entry=0x555555810510 "Scan", parameters=0x555555c7a530, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, fd_list=fd_list@entry=0x0, cancellable=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2664
#5 0x00007ffff66cf686 in g_dbus_proxy_call (proxy=<optimized out>, method_name=method_name@entry=0x555555810510 "Scan", parameters=<optimized out>, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, cancellable=cancellable@entry=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2970
#6 0x000055555574e026 in nm_supplicant_interface_request_scan (self=0x555555ac2220 [NMSupplicantInterface], ssids=ssids@entry=0x0) at src/supplicant/nm-supplicant-interface.c:1821
#7 0x00007fffe1038276 in request_wireless_scan (self=self@entry=0x555555c6ee60 [NMDeviceWifi], periodic=periodic@entry=0, force_if_scanning=force_if_scanning@entry=0, ssids=<optimized out>, ssids@entry=0x0) at src/devices/wifi/nm-device-wifi.c:1347
#8 0x00007fffe1039011 in device_state_changed (device=0x555555c6ee60 [NMDeviceWifi], new_state=NM_DEVICE_STATE_DISCONNECTED, old_state=<optimized out>, reason=<optimized out>)
at src/devices/wifi/nm-device-wifi.c:2998
#9 0x00007ffff432ed1e in ffi_call_unix64 () at ../src/x86/unix64.S:76
#10 0x00007ffff432e68f in ffi_call (cif=cif@entry=0x7fffffffdc70, fn=fn@entry=0x7fffe1038e1e <device_state_changed>, rvalue=<optimized out>, avalue=avalue@entry=0x7fffffffdb60)
at ../src/x86/ffi64.c:525
#15 0x00007ffff63db66f in <emit signal ??? on instance 0x555555c6ee60 [NMDeviceWifi]> (instance=instance@entry=0x555555c6ee60, signal_id=<optimized out>, detail=detail@entry=0)
at gsignal.c:3447
#11 0x00007ffff63bff39 in g_cclosure_marshal_generic (closure=0x555555c22ea0, return_gvalue=0x0, n_param_values=<optimized out>, param_values=<optimized out>, invocation_hint=<optimized out>, marshal_data=<optimized out>) at gclosure.c:1490
#12 0x00007ffff63bf73d in g_closure_invoke (closure=0x555555c22ea0, return_value=0x0, n_param_values=4, param_values=0x7fffffffdea0, invocation_hint=0x7fffffffde20) at gclosure.c:804
#13 0x00007ffff63d1f30 in signal_emit_unlocked_R (node=node@entry=0x555555c22750, detail=detail@entry=0, instance=instance@entry=0x555555c6ee60, emission_return=emission_return@entry=0x0, instance_and_params=instance_and_params@entry=0x7fffffffdea0) at gsignal.c:3673
#14 0x00007ffff63dad05 in g_signal_emit_valist (instance=0x555555c6ee60, signal_id=<optimized out>, detail=0, var_args=var_args@entry=0x7fffffffe0b0) at gsignal.c:3391
#16 0x00005555556f0f18 in _set_state_full (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED, quitting=quitting@entry=0) at src/devices/nm-device.c:13268
#17 0x00005555556f1774 in nm_device_state_changed (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED) at src/devices/nm-device.c:13435
#18 0x00005555555bcf95 in recheck_assume_connection (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi]) at src/nm-manager.c:2297
#19 0x00005555555bd53e in _device_realize_finish (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi], plink=plink@entry=0x555555ae43d8)
at src/nm-manager.c:2473
#20 0x00005555555c01d0 in platform_link_added (self=self@entry=0x555555b09140 [NMManager], ifindex=<optimized out>, plink=plink@entry=0x555555ae43d8, guess_assume=<optimized out>, dev_state=<optimized out>) at src/nm-manager.c:2789
#21 0x00005555555c0cec in platform_query_devices (self=self@entry=0x555555b09140 [NMManager]) at src/nm-manager.c:2901
#22 0x00005555555c439e in nm_manager_start (self=0x555555b09140 [NMManager], error=<optimized out>) at src/nm-manager.c:5632
#23 0x000055555558498e in main (argc=<optimized out>, argv=<optimized out>) at src/main.c:413
2018-04-04 14:48:52 +02:00
|
|
|
gs_unref_object NMConnection *connection = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMSettingsConnection *added;
|
|
|
|
|
GError *error = NULL;
|
|
|
|
|
gs_free_error GError *gen_error = NULL;
|
|
|
|
|
NMDevice *master = NULL;
|
|
|
|
|
int ifindex = nm_device_get_ifindex(device);
|
|
|
|
|
NMSettingsConnection *matched = NULL;
|
|
|
|
|
NMSettingsConnection *connection_checked = NULL;
|
|
|
|
|
gboolean assume_state_guess_assume = FALSE;
|
|
|
|
|
const char *assume_state_connection_uuid = NULL;
|
|
|
|
|
gboolean maybe_later, only_by_uuid = FALSE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-06-20 20:13:14 +02:00
|
|
|
if (out_generated)
|
|
|
|
|
*out_generated = FALSE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-06-07 16:31:18 +02:00
|
|
|
if (!nm_device_can_assume_connections(device)) {
|
|
|
|
|
nm_device_assume_state_reset(device);
|
|
|
|
|
_LOG2D(LOGD_DEVICE, device, "assume: device cannot assume connection");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-18 20:17:57 +02:00
|
|
|
if (ifindex) {
|
2017-09-29 15:11:33 +02:00
|
|
|
int master_ifindex = nm_platform_link_get_master(priv->platform, ifindex);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-02-26 10:07:36 +01:00
|
|
|
/* Check that the master is activating before assuming a
|
2022-03-30 16:06:38 +02:00
|
|
|
* slave connection. However, ignore ovs-system/ovs-netdev master as
|
2019-02-26 10:07:36 +01:00
|
|
|
* we never manage it.
|
|
|
|
|
*/
|
|
|
|
|
if (master_ifindex
|
|
|
|
|
&& nm_platform_link_get_type(priv->platform, master_ifindex)
|
|
|
|
|
!= NM_LINK_TYPE_OPENVSWITCH) {
|
2016-03-02 11:38:26 +01:00
|
|
|
master = nm_manager_get_device_by_ifindex(self, master_ifindex);
|
2014-06-18 20:17:57 +02:00
|
|
|
if (!master) {
|
2017-06-07 16:41:06 +02:00
|
|
|
_LOG2D(LOGD_DEVICE,
|
|
|
|
|
device,
|
|
|
|
|
"assume: don't assume because "
|
|
|
|
|
"cannot generate connection for slave before its master (%s/%d)",
|
2017-09-29 15:11:33 +02:00
|
|
|
nm_platform_link_get_name(priv->platform, master_ifindex),
|
|
|
|
|
master_ifindex);
|
2014-06-18 20:17:57 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
if (!nm_device_get_act_request(master)) {
|
2017-06-07 16:41:06 +02:00
|
|
|
_LOG2D(LOGD_DEVICE,
|
|
|
|
|
device,
|
|
|
|
|
"assume: don't assume because "
|
|
|
|
|
"cannot generate connection for slave before master %s activates",
|
|
|
|
|
nm_device_get_iface(master));
|
2014-06-18 20:17:57 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2021-05-07 13:57:04 +02:00
|
|
|
if (nm_config_data_get_device_config_boolean(NM_CONFIG_GET_DATA,
|
|
|
|
|
NM_CONFIG_KEYFILE_KEY_DEVICE_KEEP_CONFIGURATION,
|
|
|
|
|
device,
|
|
|
|
|
TRUE,
|
|
|
|
|
TRUE)) {
|
|
|
|
|
/* The core of the API is nm_device_generate_connection() function, based on
|
|
|
|
|
* update_connection() virtual method and the @connection_type_supported
|
|
|
|
|
* class attribute. Devices that support assuming existing connections must
|
|
|
|
|
* have update_connection() implemented, otherwise
|
|
|
|
|
* nm_device_generate_connection() returns NULL. */
|
|
|
|
|
connection = nm_device_generate_connection(device, master, &maybe_later, &gen_error);
|
|
|
|
|
if (!connection) {
|
|
|
|
|
if (maybe_later) {
|
|
|
|
|
/* The device can potentially assume connections, but at this
|
|
|
|
|
* time we can't generate a connection because no address is
|
|
|
|
|
* configured. Allow the device to assume a connection indicated
|
|
|
|
|
* in the state file by UUID. */
|
|
|
|
|
only_by_uuid = TRUE;
|
|
|
|
|
} else {
|
|
|
|
|
nm_device_assume_state_reset(device);
|
|
|
|
|
_LOG2D(LOGD_DEVICE,
|
|
|
|
|
device,
|
|
|
|
|
"assume: cannot generate connection: %s",
|
|
|
|
|
gen_error->message);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
Revert "core: merge branch 'bg/restart-assume-rh1551958'"
This reverts commit cc1920d71470042c4e0837848da9183526b663d0, reversing
changes made to eb8257dea5802a004af9cccacb30af98440e2172.
This breaks restart, at least for Wi-Fi devices:
#0 0x00007ffff5ee8771 in _g_log_abort (breakpoint=breakpoint@entry=1) at gmessages.c:554
#1 0x00007ffff5ee9a5b in g_logv (log_domain=0x7ffff671a738 "GLib-GIO", log_level=G_LOG_LEVEL_CRITICAL, format=<optimized out>, args=args@entry=0x7fffffffd720) at gmessages.c:1362
#2 0x00007ffff5ee9baf in g_log (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", log_level=log_level@entry=G_LOG_LEVEL_CRITICAL, format=format@entry=0x7ffff5f347ea "%s: assertion '%s' failed") at gmessages.c:1403
#3 0x00007ffff5eea0f9 in g_return_if_fail_warning (log_domain=log_domain@entry=0x7ffff671a738 "GLib-GIO", pretty_function=pretty_function@entry=0x7ffff673fc10 <__func__.25628> "g_dbus_proxy_call_internal", expression=expression@entry=0x7ffff673fb1c "G_IS_DBUS_PROXY (proxy)") at gmessages.c:2702
#4 0x00007ffff66cdc5f in g_dbus_proxy_call_internal (proxy=0x0, method_name=method_name@entry=0x555555810510 "Scan", parameters=0x555555c7a530, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, fd_list=fd_list@entry=0x0, cancellable=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2664
#5 0x00007ffff66cf686 in g_dbus_proxy_call (proxy=<optimized out>, method_name=method_name@entry=0x555555810510 "Scan", parameters=<optimized out>, flags=flags@entry=G_DBUS_CALL_FLAGS_NONE, timeout_msec=timeout_msec@entry=-1, cancellable=cancellable@entry=0x0, callback=0x55555574cb96 <scan_request_cb>, user_data=0x555555ac2220) at gdbusproxy.c:2970
#6 0x000055555574e026 in nm_supplicant_interface_request_scan (self=0x555555ac2220 [NMSupplicantInterface], ssids=ssids@entry=0x0) at src/supplicant/nm-supplicant-interface.c:1821
#7 0x00007fffe1038276 in request_wireless_scan (self=self@entry=0x555555c6ee60 [NMDeviceWifi], periodic=periodic@entry=0, force_if_scanning=force_if_scanning@entry=0, ssids=<optimized out>, ssids@entry=0x0) at src/devices/wifi/nm-device-wifi.c:1347
#8 0x00007fffe1039011 in device_state_changed (device=0x555555c6ee60 [NMDeviceWifi], new_state=NM_DEVICE_STATE_DISCONNECTED, old_state=<optimized out>, reason=<optimized out>)
at src/devices/wifi/nm-device-wifi.c:2998
#9 0x00007ffff432ed1e in ffi_call_unix64 () at ../src/x86/unix64.S:76
#10 0x00007ffff432e68f in ffi_call (cif=cif@entry=0x7fffffffdc70, fn=fn@entry=0x7fffe1038e1e <device_state_changed>, rvalue=<optimized out>, avalue=avalue@entry=0x7fffffffdb60)
at ../src/x86/ffi64.c:525
#15 0x00007ffff63db66f in <emit signal ??? on instance 0x555555c6ee60 [NMDeviceWifi]> (instance=instance@entry=0x555555c6ee60, signal_id=<optimized out>, detail=detail@entry=0)
at gsignal.c:3447
#11 0x00007ffff63bff39 in g_cclosure_marshal_generic (closure=0x555555c22ea0, return_gvalue=0x0, n_param_values=<optimized out>, param_values=<optimized out>, invocation_hint=<optimized out>, marshal_data=<optimized out>) at gclosure.c:1490
#12 0x00007ffff63bf73d in g_closure_invoke (closure=0x555555c22ea0, return_value=0x0, n_param_values=4, param_values=0x7fffffffdea0, invocation_hint=0x7fffffffde20) at gclosure.c:804
#13 0x00007ffff63d1f30 in signal_emit_unlocked_R (node=node@entry=0x555555c22750, detail=detail@entry=0, instance=instance@entry=0x555555c6ee60, emission_return=emission_return@entry=0x0, instance_and_params=instance_and_params@entry=0x7fffffffdea0) at gsignal.c:3673
#14 0x00007ffff63dad05 in g_signal_emit_valist (instance=0x555555c6ee60, signal_id=<optimized out>, detail=0, var_args=var_args@entry=0x7fffffffe0b0) at gsignal.c:3391
#16 0x00005555556f0f18 in _set_state_full (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED, quitting=quitting@entry=0) at src/devices/nm-device.c:13268
#17 0x00005555556f1774 in nm_device_state_changed (self=self@entry=0x555555c6ee60 [NMDeviceWifi], state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED) at src/devices/nm-device.c:13435
#18 0x00005555555bcf95 in recheck_assume_connection (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi]) at src/nm-manager.c:2297
#19 0x00005555555bd53e in _device_realize_finish (self=self@entry=0x555555b09140 [NMManager], device=device@entry=0x555555c6ee60 [NMDeviceWifi], plink=plink@entry=0x555555ae43d8)
at src/nm-manager.c:2473
#20 0x00005555555c01d0 in platform_link_added (self=self@entry=0x555555b09140 [NMManager], ifindex=<optimized out>, plink=plink@entry=0x555555ae43d8, guess_assume=<optimized out>, dev_state=<optimized out>) at src/nm-manager.c:2789
#21 0x00005555555c0cec in platform_query_devices (self=self@entry=0x555555b09140 [NMManager]) at src/nm-manager.c:2901
#22 0x00005555555c439e in nm_manager_start (self=0x555555b09140 [NMManager], error=<optimized out>) at src/nm-manager.c:5632
#23 0x000055555558498e in main (argc=<optimized out>, argv=<optimized out>) at src/main.c:413
2018-04-04 14:48:52 +02:00
|
|
|
}
|
2021-05-07 13:57:04 +02:00
|
|
|
} else {
|
|
|
|
|
connection = NULL;
|
|
|
|
|
only_by_uuid = TRUE;
|
|
|
|
|
g_set_error(&gen_error,
|
|
|
|
|
NM_DEVICE_ERROR,
|
|
|
|
|
NM_DEVICE_ERROR_FAILED,
|
|
|
|
|
"device %s has 'keep-configuration=no'",
|
|
|
|
|
nm_device_get_iface(device));
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
nm_device_assume_state_get(device, &assume_state_guess_assume, &assume_state_connection_uuid);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-05-07 10:13:26 +02:00
|
|
|
/* If the device state file indicates a connection that was active before NM
|
|
|
|
|
* restarted, perform basic sanity checks on it. */
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if (assume_state_connection_uuid
|
|
|
|
|
&& (connection_checked =
|
|
|
|
|
nm_settings_get_connection_by_uuid(priv->settings, assume_state_connection_uuid))
|
2018-08-08 11:33:31 +02:00
|
|
|
&& new_activation_allowed_for_connection(self, connection_checked)
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
&& nm_device_check_connection_compatible(
|
|
|
|
|
device,
|
|
|
|
|
nm_settings_connection_get_connection(connection_checked),
|
|
|
|
|
NULL)) {
|
2018-04-05 11:19:54 +02:00
|
|
|
if (connection) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMConnection *con = nm_settings_connection_get_connection(connection_checked);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (nm_utils_match_connection((NMConnection *[]){con, NULL},
|
|
|
|
|
connection,
|
|
|
|
|
TRUE,
|
|
|
|
|
nm_device_has_carrier(device),
|
|
|
|
|
nm_device_get_route_metric(device, AF_INET),
|
|
|
|
|
nm_device_get_route_metric(device, AF_INET6),
|
|
|
|
|
NULL,
|
|
|
|
|
NULL))
|
|
|
|
|
matched = connection_checked;
|
2018-04-05 11:19:54 +02:00
|
|
|
} else
|
|
|
|
|
matched = connection_checked;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-05 11:19:54 +02:00
|
|
|
if (!matched && only_by_uuid) {
|
|
|
|
|
_LOG2D(LOGD_DEVICE, device, "assume: cannot generate connection: %s", gen_error->message);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if (!matched && assume_state_guess_assume) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
gs_free NMSettingsConnection **sett_conns = NULL;
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
guint len, i, j;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-05-07 10:13:26 +02:00
|
|
|
/* @assume_state_guess_assume=TRUE means this is the first start of NM
|
|
|
|
|
* and the state file contains no UUID. Search persistent connections
|
|
|
|
|
* for a matching candidate. */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conns = nm_manager_get_activatable_connections(self, FALSE, FALSE, &len);
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
if (len > 0) {
|
|
|
|
|
for (i = 0, j = 0; i < len; i++) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn = sett_conns[i];
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (sett_conn != connection_checked
|
|
|
|
|
&& nm_device_check_connection_compatible(
|
|
|
|
|
device,
|
|
|
|
|
nm_settings_connection_get_connection(sett_conn),
|
|
|
|
|
NULL))
|
|
|
|
|
sett_conns[j++] = sett_conn;
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
}
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conns[j] = NULL;
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
len = j;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (len > 0) {
|
|
|
|
|
gs_free NMConnection **conns = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMConnection *con;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
g_qsort_with_data(sett_conns,
|
|
|
|
|
len,
|
|
|
|
|
sizeof(sett_conns[0]),
|
|
|
|
|
nm_settings_connection_cmp_timestamp_p_with_data,
|
|
|
|
|
NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
conns = nm_settings_connections_array_to_connections(sett_conns, len);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
con = nm_utils_match_connection(conns,
|
|
|
|
|
connection,
|
|
|
|
|
FALSE,
|
|
|
|
|
nm_device_has_carrier(device),
|
|
|
|
|
nm_device_get_route_metric(device, AF_INET),
|
|
|
|
|
nm_device_get_route_metric(device, AF_INET6),
|
|
|
|
|
NULL,
|
|
|
|
|
NULL);
|
|
|
|
|
if (con) {
|
|
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
|
if (conns[i] == con) {
|
|
|
|
|
matched = sett_conns[i];
|
|
|
|
|
break;
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
}
|
|
|
|
|
nm_assert(matched);
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
if (matched) {
|
2020-06-09 09:10:53 +02:00
|
|
|
if (NM_FLAGS_HAS(nm_settings_connection_get_flags(matched),
|
|
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_EXTERNAL)) {
|
|
|
|
|
_LOG2D(LOGD_DEVICE,
|
|
|
|
|
device,
|
|
|
|
|
"assume: take over previous connection '%s' (%s)",
|
|
|
|
|
nm_settings_connection_get_id(matched),
|
|
|
|
|
nm_settings_connection_get_uuid(matched));
|
|
|
|
|
NM_SET_OUT(out_generated, TRUE);
|
|
|
|
|
} else {
|
|
|
|
|
_LOG2I(LOGD_DEVICE,
|
|
|
|
|
device,
|
|
|
|
|
"assume: will attempt to assume matching connection '%s' (%s)%s",
|
|
|
|
|
nm_settings_connection_get_id(matched),
|
|
|
|
|
nm_settings_connection_get_uuid(matched),
|
|
|
|
|
assume_state_connection_uuid
|
|
|
|
|
&& nm_streq(assume_state_connection_uuid,
|
|
|
|
|
nm_settings_connection_get_uuid(matched))
|
|
|
|
|
? " (indicated)"
|
|
|
|
|
: " (guessed)");
|
|
|
|
|
}
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
nm_device_assume_state_reset(device);
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
return matched;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-05-07 10:13:26 +02:00
|
|
|
/* When no configured connection matches the generated connection, we keep
|
|
|
|
|
* the generated connection instead. */
|
2017-06-07 16:41:06 +02:00
|
|
|
_LOG2D(LOGD_DEVICE,
|
|
|
|
|
device,
|
|
|
|
|
"assume: generated connection '%s' (%s)",
|
|
|
|
|
nm_connection_get_id(connection),
|
|
|
|
|
nm_connection_get_uuid(connection));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
nm_device_assume_state_reset(device);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
if (!nm_settings_add_connection(priv->settings,
|
2022-03-14 10:20:11 +01:00
|
|
|
NULL,
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
connection,
|
|
|
|
|
NM_SETTINGS_CONNECTION_PERSIST_MODE_IN_MEMORY_ONLY,
|
core,libnm: add AddConnection2() D-Bus API to block autoconnect from the start
It should be possible to add a profile with autoconnect blocked form the
start. Update2() has a %NM_SETTINGS_UPDATE2_FLAG_BLOCK_AUTOCONNECT flag to
block autoconnect, and so we need something similar when adding a connection.
As the existing AddConnection() and AddConnectionUnsaved() API is not
extensible, add AddConnection2() that has flags and room for additional
arguments.
Then add and implement the new flag %NM_SETTINGS_ADD_CONNECTION2_FLAG_BLOCK_AUTOCONNECT
for AddConnection2().
Note that libnm's nm_client_add_connection2() API can completely replace
the existing nm_client_add_connection_async() call. In particular, it
will automatically prefer to call the D-Bus methods AddConnection() and
AddConnectionUnsaved(), in order to work with server versions older than
1.20. The purpose of this is that when upgrading the package, the
running NetworkManager might still be older than the installed libnm.
Anyway, so since nm_client_add_connection2_finish() also has a result
output, the caller needs to decide whether he cares about that result.
Hence it has an argument ignore_out_result, which allows to fallback to
the old API. One might argue that a caller who doesn't care about the
output results while still wanting to be backward compatible, should
itself choose to call nm_client_add_connection_async() or
nm_client_add_connection2(). But instead, it's more convenient if the
new function can fully replace the old one, so that the caller does not
need to switch which start/finish method to call.
https://bugzilla.redhat.com/show_bug.cgi?id=1677068
2019-07-09 15:22:01 +02:00
|
|
|
NM_SETTINGS_CONNECTION_ADD_REASON_NONE,
|
core: add "external" flag for connections of external devices
When a device is not marked as unmanaged, but also not actively managed
by NetworkManager, then NetworkManager will generate an in-memory
profile to represent the active state, if the device is up and
configured (with an IP address).
Such profiles are commonly named like "eth0", and they are utterly
confusing to users, because they look as if NetworkManager actually
manages the device, when it really just shows that somebody else configures
the device.
We should express this better in the UI, hence add flags to indicate
that.
In practice, such profiles are UNSAVED, NM_GENERATED, and VOLATILE. But
add an explicit flag to represent that.
https://bugzilla.redhat.com/show_bug.cgi?id=1816202
2020-06-08 19:34:50 +02:00
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_NM_GENERATED
|
|
|
|
|
| NM_SETTINGS_CONNECTION_INT_FLAGS_VOLATILE
|
|
|
|
|
| NM_SETTINGS_CONNECTION_INT_FLAGS_EXTERNAL,
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
&added,
|
|
|
|
|
&error)) {
|
2017-06-07 16:41:06 +02:00
|
|
|
_LOG2W(LOGD_SETTINGS,
|
|
|
|
|
device,
|
|
|
|
|
"assume: failure to save generated connection '%s': %s",
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_connection_get_id(connection),
|
2016-02-28 16:25:36 +01:00
|
|
|
error->message);
|
2017-06-07 16:41:06 +02:00
|
|
|
g_error_free(error);
|
|
|
|
|
return NULL;
|
2013-08-23 15:45:17 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-06-07 16:41:06 +02:00
|
|
|
NM_SET_OUT(out_generated, TRUE);
|
|
|
|
|
return added;
|
2013-06-27 14:39:13 +02:00
|
|
|
}
|
|
|
|
|
|
2019-12-03 19:00:42 +03:00
|
|
|
static gboolean
|
|
|
|
|
copy_lease(const char *src, const char *dst)
|
|
|
|
|
{
|
2019-12-10 14:58:29 +01:00
|
|
|
nm_auto_close int src_fd = -1;
|
|
|
|
|
int dst_fd;
|
2019-12-11 18:55:07 +03:00
|
|
|
ssize_t res, size = SSIZE_MAX;
|
2019-12-03 19:00:42 +03:00
|
|
|
|
|
|
|
|
src_fd = open(src, O_RDONLY | O_CLOEXEC);
|
|
|
|
|
if (src_fd < 0)
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
dst_fd = open(dst, O_CREAT | O_EXCL | O_CLOEXEC | O_WRONLY, 0644);
|
2019-12-10 14:58:29 +01:00
|
|
|
if (dst_fd < 0)
|
2019-12-03 19:00:42 +03:00
|
|
|
return FALSE;
|
2019-12-10 14:58:29 +01:00
|
|
|
|
2019-12-11 18:55:07 +03:00
|
|
|
while ((res = sendfile(dst_fd, src_fd, NULL, size)) > 0)
|
|
|
|
|
size -= res;
|
2019-12-03 19:00:42 +03:00
|
|
|
|
2019-12-10 14:58:29 +01:00
|
|
|
nm_close(dst_fd);
|
2019-12-03 19:00:42 +03:00
|
|
|
|
2019-12-10 14:58:29 +01:00
|
|
|
if (res != 0) {
|
|
|
|
|
unlink(dst);
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2019-12-03 19:00:42 +03:00
|
|
|
|
2019-12-10 14:58:29 +01:00
|
|
|
return TRUE;
|
2019-12-03 19:00:42 +03:00
|
|
|
}
|
|
|
|
|
|
2014-06-20 20:13:14 +02:00
|
|
|
static gboolean
|
core: only assume connections that were managed in a previous run of NetworkManager
Before, we would have the concept of assumed connections, which is used
for (1) externally configured device that NetworkManager should not
touch and (2) connections that NetworkManager should gracefully take
over after a restart (seamlessly, non-destructively).
The behavior was unclear and mixed. It wasn't clear whether the device
is in no-touch mode (1) or gracefully take-over (2).
Previous commits already introduce separate activation types EXTERNAL (1)
and ASSUME (2).
Also, previously, we would for both (1) and (2) try to find a matching
connection and use it. That doesn't make sense for either.
In the external case (1), we should not pretend that an existing connection
is active. Let's always create a new in-memory connection for these
cases. Note that this means, external devices now will always generate
a connection, instead of pretending an existing one is active.
For the assume case (2), we shall not use nm_utils_match_connection() to
guess which connection might be active. It can only the one that was
active on a previous run of NetworkManager. So, use the information from
the state file and try to activate it. If that fails, it is not an
assume activation type. Note, that this means we now most of the time
don't do ASSUME anymore. Most of the time we do EXTERNAL activation
That is because the state information is only available after restart
of NetworkManager.
2017-03-08 08:45:11 +01:00
|
|
|
recheck_assume_connection(NMManager *self, NMDevice *device)
|
2014-05-28 10:18:34 -04:00
|
|
|
{
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn;
|
2017-03-09 16:58:37 +01:00
|
|
|
gboolean was_unmanaged = FALSE;
|
|
|
|
|
gboolean generated = FALSE;
|
2014-08-01 22:46:49 +02:00
|
|
|
NMDeviceState state;
|
2019-07-31 16:12:22 +02:00
|
|
|
gboolean activation_type_assume;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-12-04 17:17:47 +01:00
|
|
|
g_return_val_if_fail(NM_IS_MANAGER(self), FALSE);
|
|
|
|
|
g_return_val_if_fail(NM_IS_DEVICE(device), FALSE);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if (!nm_device_get_managed(device, FALSE)) {
|
2022-10-12 19:26:27 +02:00
|
|
|
/* If the device is unmanaged by NM_UNMANAGED_PLATFORM_INIT,
|
2021-05-11 14:44:20 +02:00
|
|
|
* don't reset the state now but wait until it becomes managed. */
|
2022-10-12 19:26:27 +02:00
|
|
|
if (nm_device_get_unmanaged_flags(device, NM_UNMANAGED_ALL) & ~NM_UNMANAGED_PLATFORM_INIT)
|
2021-05-11 14:44:20 +02:00
|
|
|
nm_device_assume_state_reset(device);
|
2022-08-30 13:41:36 +02:00
|
|
|
_LOG2D(LOGD_DEVICE, device, "assume: don't assume because device is not managed");
|
2014-06-20 20:13:14 +02:00
|
|
|
return FALSE;
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-12-03 14:24:18 -06:00
|
|
|
state = nm_device_get_state(device);
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if (state > NM_DEVICE_STATE_DISCONNECTED) {
|
2017-06-07 13:30:08 +02:00
|
|
|
_LOG2D(LOGD_DEVICE,
|
|
|
|
|
device,
|
|
|
|
|
"assume: don't assume due to device state %s",
|
2021-08-09 14:54:17 +02:00
|
|
|
nm_device_state_to_string(state));
|
2017-03-13 15:34:14 +01:00
|
|
|
return FALSE;
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = get_existing_connection(self, device, &generated);
|
2017-06-07 16:41:06 +02:00
|
|
|
/* log no reason. get_existing_connection() already does it. */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!sett_conn)
|
2014-06-20 20:13:14 +02:00
|
|
|
return FALSE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-07-31 16:12:22 +02:00
|
|
|
activation_type_assume = !generated;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-07-31 16:12:22 +02:00
|
|
|
if (state == NM_DEVICE_STATE_UNMANAGED) {
|
|
|
|
|
gs_free char *initramfs_lease =
|
|
|
|
|
g_strdup_printf(RUNSTATEDIR "/initramfs/net.%s.lease", nm_device_get_iface(device));
|
|
|
|
|
gs_free char *connection_lease = g_strdup_printf(NMRUNDIR "/dhclient-%s-%s.lease",
|
|
|
|
|
nm_settings_connection_get_uuid(sett_conn),
|
|
|
|
|
nm_device_get_iface(device));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-12-03 19:00:42 +03:00
|
|
|
if (copy_lease(initramfs_lease, connection_lease)) {
|
|
|
|
|
unlink(initramfs_lease);
|
2019-07-31 16:12:22 +02:00
|
|
|
/*
|
|
|
|
|
* We've managed to steal the lease used by initramfs before it
|
|
|
|
|
* killed off the dhclient. We need to take ownership of the configured
|
|
|
|
|
* connection and act like the device was configured by us.
|
2020-07-01 17:20:40 -04:00
|
|
|
* Otherwise, the address would just expire.
|
2019-07-31 16:12:22 +02:00
|
|
|
*/
|
|
|
|
|
_LOG2I(LOGD_DEVICE, device, "assume: taking over an initramfs-configured connection");
|
|
|
|
|
activation_type_assume = TRUE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-07-31 16:12:22 +02:00
|
|
|
if (generated) {
|
2020-11-23 16:04:35 +01:00
|
|
|
gs_unref_object NMConnection *con2 = NULL;
|
|
|
|
|
|
|
|
|
|
con2 = nm_simple_connection_new_clone(
|
|
|
|
|
nm_settings_connection_get_connection(sett_conn));
|
|
|
|
|
|
2019-11-15 22:09:14 +01:00
|
|
|
/* Reset the IPv4 setting to empty method=auto, regardless of what assumption guessed. */
|
2020-11-23 16:04:35 +01:00
|
|
|
nm_connection_add_setting(con2,
|
2019-11-15 22:09:14 +01:00
|
|
|
g_object_new(NM_TYPE_SETTING_IP4_CONFIG,
|
|
|
|
|
NM_SETTING_IP_CONFIG_METHOD,
|
|
|
|
|
NM_SETTING_IP4_CONFIG_METHOD_AUTO,
|
|
|
|
|
NULL));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-04-03 17:32:06 +02:00
|
|
|
nm_settings_connection_update(
|
|
|
|
|
sett_conn,
|
2022-03-14 10:20:12 +01:00
|
|
|
NULL,
|
2021-04-03 17:32:06 +02:00
|
|
|
con2,
|
|
|
|
|
NM_SETTINGS_CONNECTION_PERSIST_MODE_KEEP,
|
|
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_NONE,
|
|
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_VOLATILE
|
|
|
|
|
| NM_SETTINGS_CONNECTION_INT_FLAGS_EXTERNAL,
|
|
|
|
|
NM_SETTINGS_CONNECTION_UPDATE_REASON_UPDATE_NON_SECRET,
|
|
|
|
|
"assume-initrd",
|
|
|
|
|
NULL);
|
2019-07-31 16:12:22 +02:00
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2017-06-01 22:04:26 +02:00
|
|
|
nm_device_sys_iface_state_set(device,
|
2019-07-31 16:12:22 +02:00
|
|
|
activation_type_assume ? NM_DEVICE_SYS_IFACE_STATE_ASSUME
|
|
|
|
|
: NM_DEVICE_SYS_IFACE_STATE_EXTERNAL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
/* Move device to DISCONNECTED to activate the connection */
|
2014-08-01 22:46:49 +02:00
|
|
|
if (state == NM_DEVICE_STATE_UNMANAGED) {
|
2014-05-28 10:18:34 -04:00
|
|
|
was_unmanaged = TRUE;
|
|
|
|
|
nm_device_state_changed(device,
|
|
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED);
|
|
|
|
|
}
|
2017-03-09 16:58:37 +01:00
|
|
|
if (nm_device_get_state(device) == NM_DEVICE_STATE_UNAVAILABLE) {
|
|
|
|
|
nm_device_state_changed(device,
|
|
|
|
|
NM_DEVICE_STATE_DISCONNECTED,
|
|
|
|
|
NM_DEVICE_STATE_REASON_CONNECTION_ASSUMED);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
g_return_val_if_fail(nm_device_get_state(device) >= NM_DEVICE_STATE_DISCONNECTED, FALSE);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
gs_unref_object NMActiveConnection *active = NULL;
|
|
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
|
|
|
|
NMActiveConnection *master_ac;
|
|
|
|
|
GError *error = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
subject = nm_auth_subject_new_internal();
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-01-11 17:07:03 -02:00
|
|
|
/* Note: the lifetime of the activation connection is always bound to the profiles visibility
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
* via NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY.
|
|
|
|
|
*
|
|
|
|
|
* This only makes a difference, if the profile actually has "connection.permissions"
|
|
|
|
|
* set to limit visibility (which is not the case for externally managed, generated profiles).
|
|
|
|
|
*
|
|
|
|
|
* If we assume a previously active connection whose lifetime was unbound, we now bind it
|
|
|
|
|
* after restart. That is not correct, and can mean that the profile becomes subject to
|
|
|
|
|
* deactivation after restart (if the user logs out).
|
|
|
|
|
*
|
|
|
|
|
* This should be improved, but it's unclear how. */
|
2018-04-12 11:32:18 +02:00
|
|
|
active = _new_active_connection(
|
|
|
|
|
self,
|
|
|
|
|
FALSE,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
2018-04-12 11:32:18 +02:00
|
|
|
NULL,
|
|
|
|
|
NULL,
|
2020-09-28 16:03:33 +02:00
|
|
|
NULL,
|
2018-04-12 11:32:18 +02:00
|
|
|
device,
|
|
|
|
|
subject,
|
2019-07-31 16:12:22 +02:00
|
|
|
activation_type_assume ? NM_ACTIVATION_TYPE_ASSUME : NM_ACTIVATION_TYPE_EXTERNAL,
|
|
|
|
|
activation_type_assume ? NM_ACTIVATION_REASON_ASSUME : NM_ACTIVATION_REASON_EXTERNAL,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY,
|
2017-03-09 16:58:37 +01:00
|
|
|
&error);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
if (!active) {
|
2017-06-07 13:30:08 +02:00
|
|
|
_LOGW(LOGD_DEVICE,
|
|
|
|
|
"assume: assumed connection %s failed to activate: %s",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_dbus_object_get_path(NM_DBUS_OBJECT(sett_conn)),
|
2017-03-09 16:58:37 +01:00
|
|
|
error->message);
|
|
|
|
|
g_error_free(error);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
if (was_unmanaged) {
|
|
|
|
|
nm_device_state_changed(device,
|
|
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_CONFIG_FAILED);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-07-31 16:12:22 +02:00
|
|
|
if (generated && !activation_type_assume) {
|
2017-06-07 13:30:08 +02:00
|
|
|
_LOG2D(LOGD_DEVICE,
|
|
|
|
|
device,
|
|
|
|
|
"assume: deleting generated connection after assuming failed");
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
nm_settings_connection_delete(sett_conn, FALSE);
|
2017-03-13 15:34:14 +01:00
|
|
|
} else {
|
|
|
|
|
if (nm_device_sys_iface_state_get(device) == NM_DEVICE_SYS_IFACE_STATE_ASSUME)
|
|
|
|
|
nm_device_sys_iface_state_set(device, NM_DEVICE_SYS_IFACE_STATE_EXTERNAL);
|
2017-03-09 16:58:37 +01:00
|
|
|
}
|
|
|
|
|
return FALSE;
|
2014-05-28 10:18:34 -04:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
/* If the device is a slave or VLAN, find the master ActiveConnection */
|
|
|
|
|
master_ac = NULL;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (find_master(self,
|
|
|
|
|
nm_settings_connection_get_connection(sett_conn),
|
|
|
|
|
device,
|
|
|
|
|
NULL,
|
2020-09-28 16:03:33 +02:00
|
|
|
NULL,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
&master_ac,
|
|
|
|
|
NULL)
|
|
|
|
|
&& master_ac)
|
2017-03-09 16:58:37 +01:00
|
|
|
nm_active_connection_set_master(active, master_ac);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
active_connection_add(self, active);
|
|
|
|
|
nm_device_queue_activation(device, NM_ACT_REQUEST(active));
|
2014-05-28 10:18:34 -04:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-03-09 16:58:37 +01:00
|
|
|
return TRUE;
|
2014-05-28 10:18:34 -04:00
|
|
|
}
|
|
|
|
|
|
2015-12-04 17:17:47 +01:00
|
|
|
static void
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
recheck_assume_connection_cb(NMManager *self, NMDevice *device)
|
2015-12-04 17:17:47 +01:00
|
|
|
{
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
recheck_assume_connection(self, device);
|
2015-12-04 17:17:47 +01:00
|
|
|
}
|
|
|
|
|
|
2016-12-26 11:12:39 +01:00
|
|
|
static void
|
|
|
|
|
device_ifindex_changed(NMDevice *device, GParamSpec *pspec, NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
_parent_notify_changed(self, device, FALSE);
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-12 13:27:14 -05:00
|
|
|
static void
|
|
|
|
|
device_ip_iface_changed(NMDevice *device, GParamSpec *pspec, NMManager *self)
|
|
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *ip_iface = nm_device_get_ip_iface(device);
|
2017-10-12 14:53:43 +02:00
|
|
|
NMDeviceType device_type = nm_device_get_device_type(device);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *candidate;
|
2014-06-12 13:27:14 -05:00
|
|
|
|
|
|
|
|
/* Remove NMDevice objects that are actually child devices of others,
|
|
|
|
|
* when the other device finally knows its IP interface name. For example,
|
|
|
|
|
* remove the PPP interface that's a child of a WWAN device, since it's
|
|
|
|
|
* not really a standalone NMDevice.
|
|
|
|
|
*/
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2020-07-20 09:31:03 +02:00
|
|
|
if (candidate != device && nm_streq0(nm_device_get_iface(candidate), ip_iface)
|
2017-10-12 14:53:43 +02:00
|
|
|
&& nm_device_get_device_type(candidate) == device_type
|
2014-10-15 21:17:45 -05:00
|
|
|
&& nm_device_is_real(candidate)) {
|
2019-05-17 19:22:19 +02:00
|
|
|
remove_device(self, candidate, FALSE);
|
2014-06-12 13:27:14 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-07 17:54:38 +01:00
|
|
|
static void
|
|
|
|
|
device_iface_changed(NMDevice *device, GParamSpec *pspec, NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
/* Virtual connections may refer to the new device name as
|
|
|
|
|
* parent device, retry to activate them.
|
|
|
|
|
*/
|
|
|
|
|
retry_connections_for_parent_device(self, device);
|
|
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
static void
|
|
|
|
|
_emit_device_added_removed(NMManager *self, NMDevice *device, gboolean is_added)
|
|
|
|
|
{
|
|
|
|
|
nm_dbus_object_emit_signal(NM_DBUS_OBJECT(self),
|
|
|
|
|
&interface_info_manager,
|
|
|
|
|
is_added ? &signal_info_device_added : &signal_info_device_removed,
|
|
|
|
|
"(o)",
|
|
|
|
|
nm_dbus_object_get_path(NM_DBUS_OBJECT(device)));
|
|
|
|
|
g_signal_emit(self, signals[is_added ? DEVICE_ADDED : DEVICE_REMOVED], 0, device);
|
|
|
|
|
_notify(self, PROP_DEVICES);
|
|
|
|
|
}
|
2016-01-07 17:54:38 +01:00
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
static void
|
|
|
|
|
device_realized(NMDevice *device, GParamSpec *pspec, NMManager *self)
|
|
|
|
|
{
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
_emit_device_added_removed(self, device, nm_device_is_real(device));
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
}
|
2014-10-06 11:21:54 -05:00
|
|
|
|
2018-12-03 11:09:39 +01:00
|
|
|
static NMConnectivityState
|
|
|
|
|
_get_best_connectivity(NMManager *self, int addr_family)
|
2017-03-20 13:36:00 +00:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2018-12-03 11:09:39 +01:00
|
|
|
NMConnectivityState best_state;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *dev;
|
2018-12-03 11:09:39 +01:00
|
|
|
gint64 best_metric;
|
|
|
|
|
|
|
|
|
|
if (addr_family == AF_UNSPEC) {
|
|
|
|
|
best_state = _get_best_connectivity(self, AF_INET);
|
|
|
|
|
if (nm_connectivity_state_cmp(best_state, NM_CONNECTIVITY_FULL) >= 0) {
|
|
|
|
|
/* already FULL IPv4 connectivity. No need to check IPv6, it doesn't get
|
|
|
|
|
* better. */
|
|
|
|
|
return best_state;
|
|
|
|
|
}
|
|
|
|
|
return NM_MAX_WITH_CMP(nm_connectivity_state_cmp,
|
|
|
|
|
best_state,
|
|
|
|
|
_get_best_connectivity(self, AF_INET6));
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-12-03 11:09:39 +01:00
|
|
|
nm_assert_addr_family(addr_family);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-12-03 11:09:39 +01:00
|
|
|
best_state = NM_CONNECTIVITY_UNKNOWN;
|
|
|
|
|
best_metric = G_MAXINT64;
|
|
|
|
|
c_list_for_each_entry (dev, &priv->devices_lst_head, devices_lst) {
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMPObject *r;
|
2018-12-03 11:09:39 +01:00
|
|
|
NMConnectivityState state;
|
|
|
|
|
gint64 metric;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-12-03 11:09:39 +01:00
|
|
|
r = nm_device_get_best_default_route(dev, addr_family);
|
2020-07-03 15:25:51 +02:00
|
|
|
if (r)
|
|
|
|
|
metric = NMP_OBJECT_CAST_IP_ROUTE(r)->metric;
|
|
|
|
|
else {
|
2018-12-03 11:09:39 +01:00
|
|
|
/* if all devices have no default-route, we still include the best
|
|
|
|
|
* of all connectivity state of all the devices. */
|
|
|
|
|
metric = G_MAXINT64;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (metric > best_metric) {
|
|
|
|
|
/* we already have a default route with better metric. The connectivity state
|
|
|
|
|
* of this device is irreleavnt. */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
state = nm_device_get_connectivity_state(dev, addr_family);
|
|
|
|
|
if (metric < best_metric) {
|
|
|
|
|
/* this device has a better default route. It wins. */
|
|
|
|
|
best_metric = metric;
|
2017-03-27 15:22:22 +00:00
|
|
|
best_state = state;
|
2018-12-03 11:09:39 +01:00
|
|
|
} else {
|
|
|
|
|
best_state = NM_MAX_WITH_CMP(nm_connectivity_state_cmp, best_state, state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (nm_connectivity_state_cmp(best_state, NM_CONNECTIVITY_FULL) >= 0) {
|
|
|
|
|
/* it doesn't get better than FULL. We are done. */
|
|
|
|
|
break;
|
2018-04-11 11:35:14 +02:00
|
|
|
}
|
2017-03-20 13:36:00 +00:00
|
|
|
}
|
|
|
|
|
|
2018-12-03 11:09:39 +01:00
|
|
|
return best_state;
|
|
|
|
|
}
|
2017-03-20 13:36:00 +00:00
|
|
|
|
2018-12-03 11:09:39 +01:00
|
|
|
static void
|
|
|
|
|
device_connectivity_changed(NMDevice *device, GParamSpec *pspec, NMManager *self)
|
2019-03-21 11:32:32 +01:00
|
|
|
{
|
|
|
|
|
update_connectivity_value(self);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
update_connectivity_value(NMManager *self)
|
2018-12-03 11:09:39 +01:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2018-12-03 11:09:39 +01:00
|
|
|
NMConnectivityState best_state;
|
2017-03-20 13:36:00 +00:00
|
|
|
|
2018-12-03 11:09:39 +01:00
|
|
|
best_state = _get_best_connectivity(self, AF_UNSPEC);
|
|
|
|
|
if (best_state == priv->connectivity_state)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
priv->connectivity_state = best_state;
|
|
|
|
|
|
|
|
|
|
_LOGD(LOGD_CORE,
|
|
|
|
|
"connectivity checking indicates %s",
|
|
|
|
|
nm_connectivity_state_to_string(priv->connectivity_state));
|
|
|
|
|
|
|
|
|
|
nm_manager_update_state(self);
|
|
|
|
|
_notify(self, PROP_CONNECTIVITY);
|
|
|
|
|
nm_dispatcher_call_connectivity(priv->connectivity_state, NULL, NULL, NULL);
|
2017-03-20 13:36:00 +00:00
|
|
|
}
|
|
|
|
|
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
static void
|
core: only assume connections that were managed in a previous run of NetworkManager
Before, we would have the concept of assumed connections, which is used
for (1) externally configured device that NetworkManager should not
touch and (2) connections that NetworkManager should gracefully take
over after a restart (seamlessly, non-destructively).
The behavior was unclear and mixed. It wasn't clear whether the device
is in no-touch mode (1) or gracefully take-over (2).
Previous commits already introduce separate activation types EXTERNAL (1)
and ASSUME (2).
Also, previously, we would for both (1) and (2) try to find a matching
connection and use it. That doesn't make sense for either.
In the external case (1), we should not pretend that an existing connection
is active. Let's always create a new in-memory connection for these
cases. Note that this means, external devices now will always generate
a connection, instead of pretending an existing one is active.
For the assume case (2), we shall not use nm_utils_match_connection() to
guess which connection might be active. It can only the one that was
active on a previous run of NetworkManager. So, use the information from
the state file and try to activate it. If that fails, it is not an
assume activation type. Note, that this means we now most of the time
don't do ASSUME anymore. Most of the time we do EXTERNAL activation
That is because the state information is only available after restart
of NetworkManager.
2017-03-08 08:45:11 +01:00
|
|
|
_device_realize_finish(NMManager *self, NMDevice *device, const NMPlatformLink *plink)
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
{
|
|
|
|
|
g_return_if_fail(NM_IS_MANAGER(self));
|
|
|
|
|
g_return_if_fail(NM_IS_DEVICE(device));
|
|
|
|
|
|
|
|
|
|
nm_device_realize_finish(device, plink);
|
|
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if (!nm_device_get_managed(device, FALSE)) {
|
2022-10-12 19:26:27 +02:00
|
|
|
/* If the device is unmanaged by NM_UNMANAGED_PLATFORM_INIT,
|
2021-05-11 14:44:20 +02:00
|
|
|
* don't reset the state now but wait until it becomes managed. */
|
2022-10-12 19:26:27 +02:00
|
|
|
if (nm_device_get_unmanaged_flags(device, NM_UNMANAGED_ALL) & ~NM_UNMANAGED_PLATFORM_INIT)
|
2021-05-11 14:44:20 +02:00
|
|
|
nm_device_assume_state_reset(device);
|
2014-09-24 16:58:07 -05:00
|
|
|
return;
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
}
|
2014-09-24 16:58:07 -05:00
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
if (recheck_assume_connection(self, device))
|
2014-09-24 16:58:07 -05:00
|
|
|
return;
|
|
|
|
|
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
/* if we failed to assume a connection for the managed device, but the device
|
|
|
|
|
* is still unavailable. Set UNAVAILABLE state again, this time with NOW_MANAGED. */
|
|
|
|
|
nm_device_state_changed(device,
|
|
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_NOW_MANAGED);
|
|
|
|
|
nm_device_emit_recheck_auto_activate(device);
|
2014-09-24 16:58:07 -05:00
|
|
|
}
|
|
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
/**
|
|
|
|
|
* add_device:
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @device: the #NMDevice to add
|
2015-12-08 14:51:56 +01:00
|
|
|
* @error: (out): the #GError
|
2014-02-09 10:22:19 -06:00
|
|
|
*
|
|
|
|
|
* If successful, this function will increase the references count of @device.
|
|
|
|
|
* Callers should decrease the reference count.
|
|
|
|
|
*/
|
2015-12-08 14:51:56 +01:00
|
|
|
static gboolean
|
|
|
|
|
add_device(NMManager *self, NMDevice *device, GError **error)
|
2007-02-08 15:34:26 +00:00
|
|
|
{
|
2008-04-29 Dan Williams <dcbw@redhat.com>
Handle HAL dropouts better; allow NM to start up even if HAL isn't up yet.
* marshallers/nm-marshal.list
- Add marshaller
* src/NetworkManager.c
- (main): let the NMManager handle the NMHalManager
* src/nm-hal-manager.c
src/nm-hal-manager.h
- convert to a GObject, and emit singals when stuff changes. Let the
NMManager handle the signals, instead of the NMHalManager calling
into the NMManager.
* src/nm-manager.c
src/nm-manager.h
- (remove_one_device): consolidate device removals here
- (dispose): use remove_one_device()
- (nm_manager_get_device_by_udi): make static
- (deferred_hal_manager_query_devices): idle handler to query the HAL
manager for devices at startup or wakeup time
- (nm_manager_new): create and monitor the HAL manager
- (hal_manager_udi_added_cb): new function; do what
nm_manager_add_device() used to do when signalled by the hal manager
- (hal_manager_udi_removed_cb): new function; do what
nm_manager_remove_device() used to do when signalled by the hal
manager
- (hal_manager_rfkill_changed_cb): handle rfkill changes from the
hal manager
- (hal_manager_hal_reappeared_cb): when HAL comes back, remove devices
in our device list that aren't known to HAL
- (impl_manager_sleep): on wakeup, re-add devices from an idle handler;
see comments on nm-hal-manager.c::nm_manager_state_changed() a few
commits ago
- (nm_manager_get_device_by_path, nm_manager_is_udi_managed,
nm_manager_activation_pending, nm_manager_wireless_enabled,
nm_manager_wireless_hardware_enabled,
nm_manager_set_wireless_hardware_enabled): remove, unused
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3619 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-29 23:03:00 +00:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *iface, *type_desc;
|
2022-02-01 20:21:34 +01:00
|
|
|
NMRfkillType rtype;
|
2021-11-09 13:28:54 +01:00
|
|
|
GSList *iter, *remove = NULL;
|
2014-09-24 14:57:14 -05:00
|
|
|
int ifindex;
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *dbus_path;
|
|
|
|
|
NMDevice *candidate;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
/* No duplicates */
|
2014-09-24 14:57:14 -05:00
|
|
|
ifindex = nm_device_get_ifindex(device);
|
2015-12-08 14:51:56 +01:00
|
|
|
if (ifindex > 0 && nm_manager_get_device_by_ifindex(self, ifindex)) {
|
|
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_FAILED,
|
2016-02-16 16:21:52 +01:00
|
|
|
"A device with ifindex %d already exists",
|
|
|
|
|
ifindex);
|
2015-12-08 14:51:56 +01:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
/* Remove existing devices owned by the new device; eg remove ethernet
|
|
|
|
|
* ports that are owned by a WWAN modem, since udev may announce them
|
|
|
|
|
* before the modem is fully discovered.
|
|
|
|
|
*
|
|
|
|
|
* FIXME: use parent/child device relationships instead of removing
|
|
|
|
|
* the child NMDevice entirely
|
|
|
|
|
*/
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
if (nm_device_is_real(candidate) && (iface = nm_device_get_ip_iface(candidate))
|
|
|
|
|
&& nm_device_owns_iface(device, iface))
|
2014-10-15 21:17:45 -05:00
|
|
|
remove = g_slist_prepend(remove, candidate);
|
2009-09-04 16:55:48 +02:00
|
|
|
}
|
2014-02-09 10:22:19 -06:00
|
|
|
for (iter = remove; iter; iter = iter->next)
|
2019-05-17 19:22:19 +02:00
|
|
|
remove_device(self, NM_DEVICE(iter->data), FALSE);
|
2014-02-09 10:22:19 -06:00
|
|
|
g_slist_free(remove);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
g_object_ref(device);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
nm_assert(c_list_is_empty(&device->devices_lst));
|
|
|
|
|
c_list_link_tail(&priv->devices_lst_head, &device->devices_lst);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-12-07 19:42:59 +01:00
|
|
|
g_signal_connect(device,
|
|
|
|
|
NM_DEVICE_STATE_CHANGED,
|
2014-02-10 11:13:55 +01:00
|
|
|
G_CALLBACK(manager_device_state_changed),
|
|
|
|
|
self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-02-10 08:49:47 -06:00
|
|
|
g_signal_connect(device, NM_DEVICE_REMOVED, G_CALLBACK(device_removed_cb), self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
g_signal_connect_data(device,
|
|
|
|
|
NM_DEVICE_RECHECK_ASSUME,
|
|
|
|
|
G_CALLBACK(recheck_assume_connection_cb),
|
|
|
|
|
self,
|
|
|
|
|
NULL,
|
|
|
|
|
G_CONNECT_SWAPPED);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-12-26 11:12:39 +01:00
|
|
|
g_signal_connect(device,
|
2014-06-12 13:27:14 -05:00
|
|
|
"notify::" NM_DEVICE_IP_IFACE,
|
|
|
|
|
G_CALLBACK(device_ip_iface_changed),
|
2020-09-28 16:03:33 +02:00
|
|
|
self);
|
|
|
|
|
|
2016-01-07 17:54:38 +01:00
|
|
|
g_signal_connect(device,
|
2016-12-26 11:12:39 +01:00
|
|
|
"notify::" NM_DEVICE_IFINDEX,
|
|
|
|
|
G_CALLBACK(device_ifindex_changed),
|
2020-09-28 16:03:33 +02:00
|
|
|
self);
|
|
|
|
|
|
2014-06-12 13:27:14 -05:00
|
|
|
g_signal_connect(device, "notify::" NM_DEVICE_IFACE, G_CALLBACK(device_iface_changed), self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
g_signal_connect(device, "notify::" NM_DEVICE_REAL, G_CALLBACK(device_realized), self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-03 19:09:34 +02:00
|
|
|
g_signal_connect(device,
|
|
|
|
|
"notify::" NM_DEVICE_IP4_CONNECTIVITY,
|
|
|
|
|
G_CALLBACK(device_connectivity_changed),
|
|
|
|
|
self);
|
|
|
|
|
g_signal_connect(device,
|
|
|
|
|
"notify::" NM_DEVICE_IP6_CONNECTIVITY,
|
2017-03-20 13:36:00 +00:00
|
|
|
G_CALLBACK(device_connectivity_changed),
|
|
|
|
|
self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-13 17:45:34 -04:00
|
|
|
if (priv->startup) {
|
|
|
|
|
g_signal_connect(device,
|
|
|
|
|
"notify::" NM_DEVICE_HAS_PENDING_ACTION,
|
|
|
|
|
G_CALLBACK(device_has_pending_action_changed),
|
|
|
|
|
self);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2011-11-18 12:02:58 -06:00
|
|
|
/* Update global rfkill state for this device type with the device's
|
|
|
|
|
* rfkill state, and then set this device's rfkill state based on the
|
|
|
|
|
* global state.
|
|
|
|
|
*/
|
|
|
|
|
rtype = nm_device_get_rfkill_type(device);
|
2022-02-01 20:21:34 +01:00
|
|
|
if (rtype != NM_RFKILL_TYPE_UNKNOWN) {
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update(self, rtype);
|
|
|
|
|
nm_device_set_enabled(device, _rfkill_radio_state_get(self, rtype));
|
2011-04-13 21:58:25 -05:00
|
|
|
}
|
|
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
iface = nm_device_get_iface(device);
|
|
|
|
|
g_assert(iface);
|
2009-07-07 14:24:12 -04:00
|
|
|
type_desc = nm_device_get_type_desc(device);
|
|
|
|
|
g_assert(type_desc);
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
|
2022-02-02 09:39:23 +01:00
|
|
|
nm_device_set_unmanaged_by_user_settings(device, TRUE);
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
|
|
|
|
|
nm_device_set_unmanaged_flags(device, NM_UNMANAGED_SLEEPING, manager_sleeping(self));
|
2014-03-31 21:45:54 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
dbus_path = nm_dbus_object_export(NM_DBUS_OBJECT(device));
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG2I(LOGD_DEVICE, device, "new %s device (%s)", type_desc, dbus_path);
|
2015-04-03 10:08:52 -04:00
|
|
|
|
2010-10-27 20:05:23 -05:00
|
|
|
nm_settings_device_added(priv->settings, device);
|
2014-10-06 11:21:54 -05:00
|
|
|
g_signal_emit(self, signals[INTERNAL_DEVICE_ADDED], 0, device);
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify(self, PROP_ALL_DEVICES);
|
2009-08-05 18:03:09 -04:00
|
|
|
|
2016-12-26 11:12:39 +01:00
|
|
|
_parent_notify_changed(self, device, FALSE);
|
|
|
|
|
|
2015-12-08 14:51:56 +01:00
|
|
|
return TRUE;
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
static void
|
|
|
|
|
factory_device_added_cb(NMDeviceFactory *factory, NMDevice *device, gpointer user_data)
|
|
|
|
|
{
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
NMManager *self = user_data;
|
2021-11-09 13:28:54 +01:00
|
|
|
GError *error = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
g_return_if_fail(NM_IS_MANAGER(self));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-09-26 14:45:35 +02:00
|
|
|
if (nm_device_realize_start(device,
|
|
|
|
|
NULL,
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
FALSE, /* assume_state_guess_assume */
|
|
|
|
|
NULL, /* assume_state_connection_uuid */
|
|
|
|
|
FALSE, /* set_nm_owned */
|
2016-09-26 14:45:35 +02:00
|
|
|
NM_UNMAN_FLAG_OP_FORGET,
|
|
|
|
|
NULL,
|
|
|
|
|
&error)) {
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
add_device(self, device, NULL);
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
_device_realize_finish(self, device, NULL);
|
2017-09-13 18:38:59 +02:00
|
|
|
retry_connections_for_parent_device(self, device);
|
2014-09-24 16:58:07 -05:00
|
|
|
} else {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG2W(LOGD_DEVICE, device, "failed to realize device: %s", error->message);
|
2014-09-05 08:50:02 -05:00
|
|
|
g_error_free(error);
|
|
|
|
|
}
|
2014-02-09 10:22:19 -06:00
|
|
|
}
|
2012-02-05 23:18:32 -06:00
|
|
|
|
2014-09-17 14:17:30 -05:00
|
|
|
static void
|
|
|
|
|
_register_device_factory(NMDeviceFactory *factory, gpointer user_data)
|
2014-09-05 15:05:40 -05:00
|
|
|
{
|
2014-09-17 14:17:30 -05:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
2014-09-05 15:05:40 -05:00
|
|
|
|
|
|
|
|
g_signal_connect(factory,
|
|
|
|
|
NM_DEVICE_FACTORY_DEVICE_ADDED,
|
|
|
|
|
G_CALLBACK(factory_device_added_cb),
|
|
|
|
|
self);
|
bluetooth: refactor BlueZ handling and let NMBluezManager cache ObjectManager data
This is a complete refactoring of the bluetooth code.
Now that BlueZ 4 support was dropped, the separation of NMBluezManager
and NMBluez5Manager makes no sense. They should be merged.
At that point, notice that BlueZ 5's D-Bus API is fully centered around
D-Bus's ObjectManager interface. Using that interface, we basically only
call GetManagedObjects() once and register to InterfacesAdded,
InterfacesRemoved and PropertiesChanged signals. There is no need to
fetch individual properties ever.
Note how NMBluezDevice used to query the D-Bus properties itself by
creating a GDBusProxy. This is redundant, because when using the ObjectManager
interfaces, we have all information already.
Instead, let NMBluezManager basically become the client-side cache of
all of BlueZ's ObjectManager interface. NMBluezDevice was mostly concerned
about caching the D-Bus interface's state, tracking suitable profiles
(pan_connection), and moderate between bluez and NMDeviceBt.
These tasks don't get simpler by moving them to a seprate file. Let them
also be handled by NMBluezManager.
I mean, just look how it was previously: NMBluez5Manager registers to
ObjectManager interface and sees a device appearing. It creates a
NMBluezDevice object and registers to its "initialized" and
"notify:usable" signal. In the meantime, NMBluezDevice fetches the
relevant information from D-Bus (although it was already present in the
data provided by the ObjectManager) and eventually emits these usable
and initialized signals.
Then, NMBlue5Manager emits a "bdaddr-added" signal, for which NMBluezManager
creates the NMDeviceBt instance. NMBluezManager, NMBluez5Manager and
NMBluezDevice are strongly cooperating to the point that it is simpler
to merge them.
This is not mere refactoring. This patch aims to make everything
asynchronously and always cancellable. Also, it aims to fix races
and inconsistencies of the state.
- Registering to a NAP server now waits for the response and delays
activation of the NMDeviceBridge accordingly.
- For NAP connections we now watch the bnep0 interface in platform, and tear
down the device when it goes away. Bluez doesn't send us a notification
on D-Bus in that case.
- Rework establishing a DUN connection. It no longer uses blocking
connect() and does not block until rfcomm device appears. It's
all async now. It also watches the rfcomm file descriptor for
POLLERR/POLLHUP to notice disconnect.
- drop nm_device_factory_emit_component_added() and instead let
NMDeviceBt directly register to the WWan factory's "added" signal.
2019-08-11 10:43:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
|
|
|
|
void
|
2020-07-04 11:37:01 +03:00
|
|
|
nm_manager_notify_device_availability_maybe_changed(NMManager *self)
|
bluetooth: refactor BlueZ handling and let NMBluezManager cache ObjectManager data
This is a complete refactoring of the bluetooth code.
Now that BlueZ 4 support was dropped, the separation of NMBluezManager
and NMBluez5Manager makes no sense. They should be merged.
At that point, notice that BlueZ 5's D-Bus API is fully centered around
D-Bus's ObjectManager interface. Using that interface, we basically only
call GetManagedObjects() once and register to InterfacesAdded,
InterfacesRemoved and PropertiesChanged signals. There is no need to
fetch individual properties ever.
Note how NMBluezDevice used to query the D-Bus properties itself by
creating a GDBusProxy. This is redundant, because when using the ObjectManager
interfaces, we have all information already.
Instead, let NMBluezManager basically become the client-side cache of
all of BlueZ's ObjectManager interface. NMBluezDevice was mostly concerned
about caching the D-Bus interface's state, tracking suitable profiles
(pan_connection), and moderate between bluez and NMDeviceBt.
These tasks don't get simpler by moving them to a seprate file. Let them
also be handled by NMBluezManager.
I mean, just look how it was previously: NMBluez5Manager registers to
ObjectManager interface and sees a device appearing. It creates a
NMBluezDevice object and registers to its "initialized" and
"notify:usable" signal. In the meantime, NMBluezDevice fetches the
relevant information from D-Bus (although it was already present in the
data provided by the ObjectManager) and eventually emits these usable
and initialized signals.
Then, NMBlue5Manager emits a "bdaddr-added" signal, for which NMBluezManager
creates the NMDeviceBt instance. NMBluezManager, NMBluez5Manager and
NMBluezDevice are strongly cooperating to the point that it is simpler
to merge them.
This is not mere refactoring. This patch aims to make everything
asynchronously and always cancellable. Also, it aims to fix races
and inconsistencies of the state.
- Registering to a NAP server now waits for the response and delays
activation of the NMDeviceBridge accordingly.
- For NAP connections we now watch the bnep0 interface in platform, and tear
down the device when it goes away. Bluez doesn't send us a notification
on D-Bus in that case.
- Rework establishing a DUN connection. It no longer uses blocking
connect() and does not block until rfcomm device appears. It's
all async now. It also watches the rfcomm file descriptor for
POLLERR/POLLHUP to notice disconnect.
- drop nm_device_factory_emit_component_added() and instead let
NMDeviceBt directly register to the WWan factory's "added" signal.
2019-08-11 10:43:53 +02:00
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
bluetooth: refactor BlueZ handling and let NMBluezManager cache ObjectManager data
This is a complete refactoring of the bluetooth code.
Now that BlueZ 4 support was dropped, the separation of NMBluezManager
and NMBluez5Manager makes no sense. They should be merged.
At that point, notice that BlueZ 5's D-Bus API is fully centered around
D-Bus's ObjectManager interface. Using that interface, we basically only
call GetManagedObjects() once and register to InterfacesAdded,
InterfacesRemoved and PropertiesChanged signals. There is no need to
fetch individual properties ever.
Note how NMBluezDevice used to query the D-Bus properties itself by
creating a GDBusProxy. This is redundant, because when using the ObjectManager
interfaces, we have all information already.
Instead, let NMBluezManager basically become the client-side cache of
all of BlueZ's ObjectManager interface. NMBluezDevice was mostly concerned
about caching the D-Bus interface's state, tracking suitable profiles
(pan_connection), and moderate between bluez and NMDeviceBt.
These tasks don't get simpler by moving them to a seprate file. Let them
also be handled by NMBluezManager.
I mean, just look how it was previously: NMBluez5Manager registers to
ObjectManager interface and sees a device appearing. It creates a
NMBluezDevice object and registers to its "initialized" and
"notify:usable" signal. In the meantime, NMBluezDevice fetches the
relevant information from D-Bus (although it was already present in the
data provided by the ObjectManager) and eventually emits these usable
and initialized signals.
Then, NMBlue5Manager emits a "bdaddr-added" signal, for which NMBluezManager
creates the NMDeviceBt instance. NMBluezManager, NMBluez5Manager and
NMBluezDevice are strongly cooperating to the point that it is simpler
to merge them.
This is not mere refactoring. This patch aims to make everything
asynchronously and always cancellable. Also, it aims to fix races
and inconsistencies of the state.
- Registering to a NAP server now waits for the response and delays
activation of the NMDeviceBridge accordingly.
- For NAP connections we now watch the bnep0 interface in platform, and tear
down the device when it goes away. Bluez doesn't send us a notification
on D-Bus in that case.
- Rework establishing a DUN connection. It no longer uses blocking
connect() and does not block until rfcomm device appears. It's
all async now. It also watches the rfcomm file descriptor for
POLLERR/POLLHUP to notice disconnect.
- drop nm_device_factory_emit_component_added() and instead let
NMDeviceBt directly register to the WWan factory's "added" signal.
2019-08-11 10:43:53 +02:00
|
|
|
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst)
|
|
|
|
|
nm_device_notify_availability_maybe_changed(device);
|
2012-02-05 23:18:32 -06:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2014-09-05 15:57:40 -05:00
|
|
|
|
2009-01-19 11:01:00 +02:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
platform_link_added(NMManager *self,
|
2014-03-07 19:04:38 +01:00
|
|
|
int ifindex,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMPlatformLink *plink,
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
gboolean guess_assume,
|
2016-09-23 17:36:21 +02:00
|
|
|
const NMConfigDeviceStateData *dev_state)
|
2009-01-19 11:01:00 +02:00
|
|
|
{
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDeviceFactory *factory;
|
|
|
|
|
NMDevice *device = NULL;
|
|
|
|
|
NMDevice *candidate;
|
2009-01-19 11:01:00 +02:00
|
|
|
|
2013-05-24 17:30:31 -03:00
|
|
|
g_return_if_fail(ifindex > 0);
|
2012-03-06 11:38:03 -06:00
|
|
|
|
2014-09-05 13:15:44 -05:00
|
|
|
if (nm_manager_get_device_by_ifindex(self, ifindex))
|
2013-05-24 17:30:31 -03:00
|
|
|
return;
|
2009-01-19 11:01:00 +02:00
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/* Let unrealized devices try to realize themselves with the link */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2021-11-09 13:28:54 +01:00
|
|
|
gboolean compatible = TRUE;
|
|
|
|
|
gs_free_error GError *error = NULL;
|
2014-10-09 12:42:29 -05:00
|
|
|
|
2017-10-10 11:14:05 +02:00
|
|
|
if (nm_device_get_link_type(candidate) != plink->type)
|
|
|
|
|
continue;
|
|
|
|
|
|
2020-07-20 09:31:03 +02:00
|
|
|
if (!nm_streq(nm_device_get_iface(candidate), plink->name))
|
2014-10-15 21:17:45 -05:00
|
|
|
continue;
|
2014-10-09 12:42:29 -05:00
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
if (nm_device_is_real(candidate)) {
|
2018-08-02 17:45:09 +02:00
|
|
|
/* There's already a realized device with the link's name
|
|
|
|
|
* and a different ifindex.
|
2014-10-15 21:17:45 -05:00
|
|
|
*/
|
2018-08-02 17:45:09 +02:00
|
|
|
if (nm_device_get_ifindex(candidate) <= 0)
|
|
|
|
|
nm_device_update_from_platform_link(candidate, plink);
|
|
|
|
|
else {
|
|
|
|
|
/* The ifindex of a device can't be changed after
|
|
|
|
|
* initialization because it is used as a key by
|
|
|
|
|
* the dns-manager.
|
|
|
|
|
*/
|
|
|
|
|
_LOGD(LOGD_DEVICE,
|
|
|
|
|
"(%s): removing old device %p after ifindex change from %d to %d",
|
|
|
|
|
plink->name,
|
|
|
|
|
candidate,
|
|
|
|
|
nm_device_get_ifindex(candidate),
|
|
|
|
|
ifindex);
|
2019-05-17 19:22:19 +02:00
|
|
|
remove_device(self, candidate, FALSE);
|
2018-08-02 17:45:09 +02:00
|
|
|
goto add;
|
|
|
|
|
}
|
2014-10-15 21:17:45 -05:00
|
|
|
return;
|
2016-09-26 14:45:35 +02:00
|
|
|
} else if (nm_device_realize_start(candidate,
|
|
|
|
|
plink,
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
FALSE, /* assume_state_guess_assume */
|
|
|
|
|
NULL, /* assume_state_connection_uuid */
|
|
|
|
|
FALSE, /* set_nm_owned */
|
2016-09-26 14:45:35 +02:00
|
|
|
NM_UNMAN_FLAG_OP_FORGET,
|
|
|
|
|
&compatible,
|
|
|
|
|
&error)) {
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
_device_realize_finish(self, candidate, plink);
|
2014-09-24 16:58:07 -05:00
|
|
|
return;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD(LOGD_DEVICE, "(%s): failed to realize from plink: '%s'", plink->name, error->message);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/* Try next unrealized device */
|
2014-09-24 16:58:07 -05:00
|
|
|
}
|
|
|
|
|
|
2018-08-02 17:45:09 +02:00
|
|
|
add:
|
2014-09-17 14:17:30 -05:00
|
|
|
/* Try registered device factories */
|
|
|
|
|
factory = nm_device_factory_manager_find_factory_for_link_type(plink->type);
|
|
|
|
|
if (factory) {
|
2021-11-09 13:28:54 +01:00
|
|
|
gboolean ignore = FALSE;
|
|
|
|
|
gs_free_error GError *error = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-05 08:50:02 -05:00
|
|
|
device =
|
|
|
|
|
nm_device_factory_create_device(factory, plink->name, plink, NULL, &ignore, &error);
|
2014-09-17 14:17:30 -05:00
|
|
|
if (!device) {
|
2015-05-06 09:53:44 -05:00
|
|
|
if (!ignore) {
|
2016-10-06 21:28:40 +02:00
|
|
|
_LOGW(LOGD_PLATFORM,
|
|
|
|
|
"%s: factory failed to create device: %s",
|
2016-03-02 11:38:26 +01:00
|
|
|
plink->name,
|
|
|
|
|
error->message);
|
2016-04-12 10:19:21 +02:00
|
|
|
} else {
|
2016-10-06 21:28:40 +02:00
|
|
|
_LOGD(LOGD_PLATFORM,
|
|
|
|
|
"%s: factory failed to create device: %s",
|
2016-04-12 10:19:21 +02:00
|
|
|
plink->name,
|
|
|
|
|
error->message);
|
2015-05-06 09:53:44 -05:00
|
|
|
}
|
2012-02-05 23:18:32 -06:00
|
|
|
return;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2012-02-05 23:18:32 -06:00
|
|
|
if (device == NULL) {
|
2016-09-26 13:26:15 +02:00
|
|
|
gboolean nm_plugin_missing = FALSE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-10-22 17:11:24 +02:00
|
|
|
switch (plink->type) {
|
2016-06-14 11:19:15 -05:00
|
|
|
case NM_LINK_TYPE_WWAN_NET:
|
2015-05-06 09:53:44 -05:00
|
|
|
case NM_LINK_TYPE_BNEP:
|
2014-05-09 10:30:04 -05:00
|
|
|
case NM_LINK_TYPE_OLPC_MESH:
|
2014-06-27 17:02:23 +02:00
|
|
|
case NM_LINK_TYPE_TEAM:
|
2014-05-09 10:30:04 -05:00
|
|
|
case NM_LINK_TYPE_WIFI:
|
2016-10-06 21:28:40 +02:00
|
|
|
_LOGI(LOGD_PLATFORM,
|
|
|
|
|
"(%s): '%s' plugin not available; creating generic device",
|
2016-03-02 11:38:26 +01:00
|
|
|
plink->name,
|
|
|
|
|
nm_link_type_to_string(plink->type));
|
2015-04-14 14:43:31 +02:00
|
|
|
nm_plugin_missing = TRUE;
|
2020-02-21 12:56:03 +01:00
|
|
|
/* fall-through */
|
2013-04-25 15:46:39 -04:00
|
|
|
default:
|
2016-09-26 13:26:15 +02:00
|
|
|
device = nm_device_generic_new(plink, nm_plugin_missing);
|
2013-04-25 15:46:39 -04:00
|
|
|
break;
|
2012-02-05 23:18:32 -06:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2014-02-09 10:22:19 -06:00
|
|
|
if (device) {
|
2016-04-04 19:42:04 +02:00
|
|
|
gs_free_error GError *error = NULL;
|
2016-09-26 14:51:23 +02:00
|
|
|
NMUnmanFlagOp unmanaged_user_explicit = NM_UNMAN_FLAG_OP_FORGET;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-09-26 14:51:23 +02:00
|
|
|
if (dev_state) {
|
|
|
|
|
switch (dev_state->managed) {
|
|
|
|
|
case NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_MANAGED:
|
|
|
|
|
unmanaged_user_explicit = NM_UNMAN_FLAG_OP_SET_MANAGED;
|
|
|
|
|
break;
|
|
|
|
|
case NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_UNMANAGED:
|
|
|
|
|
unmanaged_user_explicit = NM_UNMAN_FLAG_OP_SET_UNMANAGED;
|
|
|
|
|
break;
|
|
|
|
|
case NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_UNKNOWN:
|
|
|
|
|
break;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2016-09-26 14:45:35 +02:00
|
|
|
if (nm_device_realize_start(device,
|
|
|
|
|
plink,
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
guess_assume,
|
|
|
|
|
dev_state ? dev_state->connection_uuid : NULL,
|
2017-06-07 22:11:50 +02:00
|
|
|
dev_state ? (dev_state->nm_owned == 1) : FALSE,
|
2016-09-26 14:51:23 +02:00
|
|
|
unmanaged_user_explicit,
|
2016-09-26 14:45:35 +02:00
|
|
|
NULL,
|
|
|
|
|
&error)) {
|
2015-12-08 14:51:56 +01:00
|
|
|
add_device(self, device, NULL);
|
manager: fix preserving assume state during activation
Originally 850c977 "device: track system interface state in NMDevice",
intended that a connection can only be assumed initially when seeing
a device for the first time. Assuming a connection later was to be
prevented by setting device's sys-iface-state to MANAGED.
That changed too much in behavior, because we used to assume external
connections also when they are activated later on. So this was attempted
to get fixed by
- acf1067 nm-manager: try assuming connections on managed devices
- b6b7d90 manager: avoid generating in memory connections during startup for managed devices
It's probably just wrong to prevent assuming connections based on the
sys-iface-state. So drop the check for sys-iface-state from
recheck_assume_connection(). Now, we can assume anytime on managed,
disconnected interfaces, like previously.
Btw, note that priv->startup is totally wrong to check there, because
priv->startup has the sole purpose of tracking startup-complete property.
Startup, as far as NMManager is concerned, is platform_query_devices().
However, the problem is that we only assume connections (contrary to
doing external activation) when we have a connection-uuid from the state
file or with guess-assume during startup.
When assuming a master device, it can fail with
(nm-bond): ignoring generated connection (IPv6LL-only and not in master-slave relationship)
thus, for internal reason the device cannot be assumed yet.
Fix that by attatching the assume-state to the device, so that on multiple
recheck_assume_connection() calls we still try to assume. Whenever we try
to assume the connection and it fails due to external reasons (like, the connection
no longer matching), we clear the assume state, so that we only try as
long as there are internal reasons why assuming fails.
https://bugzilla.redhat.com/show_bug.cgi?id=1452062
2017-06-07 17:34:47 +02:00
|
|
|
_device_realize_finish(self, device, plink);
|
2017-09-13 18:38:59 +02:00
|
|
|
retry_connections_for_parent_device(self, device);
|
2014-09-24 16:58:07 -05:00
|
|
|
} else {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGW(LOGD_DEVICE, "%s: failed to realize device: %s", plink->name, error->message);
|
2015-08-25 16:10:40 +02:00
|
|
|
}
|
2014-02-09 10:22:19 -06:00
|
|
|
g_object_unref(device);
|
|
|
|
|
}
|
2009-01-19 11:01:00 +02:00
|
|
|
}
|
|
|
|
|
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
typedef struct {
|
2017-09-29 15:04:53 +02:00
|
|
|
CList lst;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
NMManager *self;
|
|
|
|
|
int ifindex;
|
2017-09-29 15:04:53 +02:00
|
|
|
guint idle_id;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
} PlatformLinkCbData;
|
|
|
|
|
|
2021-03-31 21:32:43 +02:00
|
|
|
static gboolean
|
|
|
|
|
_check_remove_dev_on_link_deleted(NMManager *self, NMDevice *device)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-03-31 21:32:43 +02:00
|
|
|
NMSettingsConnection *const *scons = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMConnection *con;
|
2021-03-31 21:32:43 +02:00
|
|
|
guint i;
|
|
|
|
|
|
|
|
|
|
nm_assert(nm_device_is_software(device));
|
|
|
|
|
|
|
|
|
|
/* In general, software devices stick around as unrealized
|
|
|
|
|
* until their connection is removed. However, we don't want
|
|
|
|
|
* that a NM-generated connection keeps the device alive.
|
|
|
|
|
* If there are no other compatible connections, the device
|
|
|
|
|
* should be also removed.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
scons = nm_settings_get_connections(priv->settings, NULL);
|
|
|
|
|
|
|
|
|
|
for (i = 0; scons[i]; i++) {
|
|
|
|
|
con = nm_settings_connection_get_connection(scons[i]);
|
|
|
|
|
if (!nm_connection_is_virtual(con))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (NM_FLAGS_HAS(nm_settings_connection_get_flags(scons[i]),
|
|
|
|
|
NM_SETTINGS_CONNECTION_INT_FLAGS_NM_GENERATED))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (!nm_device_check_connection_compatible(device, con, NULL))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* Found a virtual connection compatible, the device must
|
|
|
|
|
* stay around unrealized. */
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
static gboolean
|
|
|
|
|
_platform_link_cb_idle(PlatformLinkCbData *data)
|
|
|
|
|
{
|
2017-09-29 15:04:53 +02:00
|
|
|
int ifindex = data->ifindex;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = data->self;
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2017-09-29 16:58:24 +02:00
|
|
|
const NMPlatformLink *plink;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-11-28 11:22:01 +01:00
|
|
|
c_list_unlink_stale(&data->lst);
|
2017-09-29 15:04:53 +02:00
|
|
|
g_slice_free(PlatformLinkCbData, data);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-09-29 16:58:24 +02:00
|
|
|
plink = nm_platform_link_get(priv->platform, ifindex);
|
|
|
|
|
if (plink) {
|
2017-10-05 14:42:02 +02:00
|
|
|
const NMPObject *plink_keep_alive = nmp_object_ref(NMP_OBJECT_UP_CAST(plink));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-09-29 16:58:24 +02:00
|
|
|
platform_link_added(self, ifindex, plink, FALSE, NULL);
|
2017-10-05 14:42:02 +02:00
|
|
|
nmp_object_unref(plink_keep_alive);
|
2015-12-04 14:11:00 +01:00
|
|
|
} else {
|
|
|
|
|
NMDevice *device;
|
2021-11-09 13:28:54 +01:00
|
|
|
GError *error = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-09-29 15:04:53 +02:00
|
|
|
device = nm_manager_get_device_by_ifindex(self, ifindex);
|
2015-12-04 14:11:00 +01:00
|
|
|
if (device) {
|
2016-01-14 17:24:25 +01:00
|
|
|
if (nm_device_is_software(device)) {
|
2017-04-13 18:28:08 +02:00
|
|
|
nm_device_sys_iface_state_set(device, NM_DEVICE_SYS_IFACE_STATE_REMOVED);
|
2015-12-04 14:11:00 +01:00
|
|
|
if (!nm_device_unrealize(device, FALSE, &error)) {
|
2017-06-07 12:46:10 +02:00
|
|
|
_LOG2W(LOGD_DEVICE, device, "failed to unrealize: %s", error->message);
|
2015-12-04 14:11:00 +01:00
|
|
|
g_clear_error(&error);
|
2019-05-17 19:22:19 +02:00
|
|
|
remove_device(self, device, FALSE);
|
2017-08-16 15:44:24 +02:00
|
|
|
} else {
|
2021-03-31 21:32:43 +02:00
|
|
|
if (_check_remove_dev_on_link_deleted(self, device))
|
|
|
|
|
remove_device(self, device, FALSE);
|
|
|
|
|
else
|
|
|
|
|
nm_device_update_from_platform_link(device, NULL);
|
2014-09-24 15:13:19 -05:00
|
|
|
}
|
2015-12-04 14:11:00 +01:00
|
|
|
} else {
|
2015-12-09 15:41:39 +01:00
|
|
|
/* Hardware and external devices always get removed when their kernel link is gone */
|
2019-05-17 19:22:19 +02:00
|
|
|
remove_device(self, device, FALSE);
|
2014-09-24 15:13:19 -05:00
|
|
|
}
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
return G_SOURCE_REMOVE;
|
|
|
|
|
}
|
|
|
|
|
|
2008-04-29 Dan Williams <dcbw@redhat.com>
Handle HAL dropouts better; allow NM to start up even if HAL isn't up yet.
* marshallers/nm-marshal.list
- Add marshaller
* src/NetworkManager.c
- (main): let the NMManager handle the NMHalManager
* src/nm-hal-manager.c
src/nm-hal-manager.h
- convert to a GObject, and emit singals when stuff changes. Let the
NMManager handle the signals, instead of the NMHalManager calling
into the NMManager.
* src/nm-manager.c
src/nm-manager.h
- (remove_one_device): consolidate device removals here
- (dispose): use remove_one_device()
- (nm_manager_get_device_by_udi): make static
- (deferred_hal_manager_query_devices): idle handler to query the HAL
manager for devices at startup or wakeup time
- (nm_manager_new): create and monitor the HAL manager
- (hal_manager_udi_added_cb): new function; do what
nm_manager_add_device() used to do when signalled by the hal manager
- (hal_manager_udi_removed_cb): new function; do what
nm_manager_remove_device() used to do when signalled by the hal
manager
- (hal_manager_rfkill_changed_cb): handle rfkill changes from the
hal manager
- (hal_manager_hal_reappeared_cb): when HAL comes back, remove devices
in our device list that aren't known to HAL
- (impl_manager_sleep): on wakeup, re-add devices from an idle handler;
see comments on nm-hal-manager.c::nm_manager_state_changed() a few
commits ago
- (nm_manager_get_device_by_path, nm_manager_is_udi_managed,
nm_manager_activation_pending, nm_manager_wireless_enabled,
nm_manager_wireless_hardware_enabled,
nm_manager_set_wireless_hardware_enabled): remove, unused
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3619 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-29 23:03:00 +00:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
platform_link_cb(NMPlatform *platform,
|
2016-10-22 13:08:36 +02:00
|
|
|
int obj_type_i,
|
2014-03-07 19:04:38 +01:00
|
|
|
int ifindex,
|
|
|
|
|
NMPlatformLink *plink,
|
2016-10-22 13:08:36 +02:00
|
|
|
int change_type_i,
|
2014-03-07 19:04:38 +01:00
|
|
|
gpointer user_data)
|
2007-02-08 15:34:26 +00:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self;
|
|
|
|
|
NMManagerPrivate *priv;
|
2016-10-22 13:08:36 +02:00
|
|
|
const NMPlatformSignalChangeType change_type = change_type_i;
|
2021-11-09 13:28:54 +01:00
|
|
|
PlatformLinkCbData *data;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-03-07 19:04:38 +01:00
|
|
|
switch (change_type) {
|
|
|
|
|
case NM_PLATFORM_SIGNAL_ADDED:
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
case NM_PLATFORM_SIGNAL_REMOVED:
|
2017-09-29 15:04:53 +02:00
|
|
|
self = NM_MANAGER(user_data);
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
data = g_slice_new(PlatformLinkCbData);
|
2017-09-29 15:04:53 +02:00
|
|
|
data->self = self;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
data->ifindex = ifindex;
|
2017-09-29 15:04:53 +02:00
|
|
|
c_list_link_tail(&priv->link_cb_lst, &data->lst);
|
|
|
|
|
data->idle_id = g_idle_add((GSourceFunc) _platform_link_cb_idle, data);
|
2014-03-07 19:04:38 +01:00
|
|
|
break;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
default:
|
2014-03-07 19:04:38 +01:00
|
|
|
break;
|
core: delay handling of link-changed platform event in manager
Backtrace:
NetworkManager[10972]: <debug> [1435142179.593334] [platform/nm-platform.c:2962] log_ip4_route(): signal: route 4 removed: 0.0.0.0/0 via 192.168.100.1 dev 85 metric 300 mss 0 src user scope global
NetworkManager[10972]: <debug> [1435142179.593421] [platform/nm-platform.c:2944] log_link(): signal: link removed: 85: bond0 <DOWN;broadcast,multicast,master> mtu 1500 arp 1 bond* init addr 7A:AB:BE:0D:19:3D driver bond
NetworkManager[10972]: <debug> [1435142179.593446] [nm-manager.c:779] remove_device(): (bond0): removing device (allow_unmanage 1, managed 1)
NetworkManager[10972]: <debug> [1435142179.596995] [devices/nm-device.c:7232] nm_device_set_unmanaged(): [0x5555559d2a40] (bond0): now unmanaged
NetworkManager[10972]: (devices/nm-device.c:8040):_set_state_full: runtime check failed: (priv->in_state_changed == FALSE)
#0 0x00007ffff4a538c3 in g_logv () at /lib64/libglib-2.0.so.0
#1 0x00007ffff4a53a3f in g_log () at /lib64/libglib-2.0.so.0
#2 0x00007ffff4a53d56 in g_warn_message () at /lib64/libglib-2.0.so.0
#3 0x00005555555b9dca in _set_state_full (self=0x5555559d2a40, state=NM_DEVICE_STATE_UNMANAGED, reason=NM_DEVICE_STATE_REASON_REMOVED, quitting=0) at devices/nm-device.c:8040
#4 0x0000555555626d7b in remove_device (manager=0x5555559631e0, device=0x5555559d2a40, quitting=0, allow_unmanage=<optimized out>) at nm-manager.c:801
#5 0x00007ffff28b7dac in ffi_call_unix64 () at /lib64/libffi.so.6
#6 0x00007ffff28b76d5 in ffi_call () at /lib64/libffi.so.6
#7 0x00007ffff4d4a628 in g_cclosure_marshal_generic () at /lib64/libgobject-2.0.so.0
#8 0x00007ffff4d49de8 in g_closure_invoke () at /lib64/libgobject-2.0.so.0
#9 0x00007ffff4d5b70d in signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#10 0x00007ffff4d63471 in g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#11 0x00007ffff4d63c78 in g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#12 0x00005555555ce4ea in do_emit_signal (platform=platform@entry=0x55555594c8b0, obj=0x555555a74c50, cache_op=NMP_CACHE_OPS_REMOVED, was_visible=<optimized out>, reason=reason@entry=
NM_PLATFORM_REASON_INTERNAL) at platform/nm-linux-platform.c:1425
#13 0x00005555555ce826 in cache_prune_candidates_prune (platform=platform@entry=0x55555594c8b0) at platform/nm-linux-platform.c:1704
#14 0x00005555555d32d3 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=85, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=0)
at platform/nm-linux-platform.c:1951
#15 0x00005555555d356b in delayed_action_handle_all (ifindex=85, platform=0x55555594c8b0) at platform/nm-linux-platform.c:1491
#16 0x00005555555d356b in delayed_action_handle_all (platform=0x55555594c8b0) at platform/nm-linux-platform.c:1573
#17 0x00005555555d356b in delayed_action_handle_all (platform=platform@entry=0x55555594c8b0, read_netlink=read_netlink@entry=0) at platform/nm-linux-platform.c:1588
#18 0x00005555555d32e2 in do_request_link (platform=platform@entry=0x55555594c8b0, ifindex=ifindex@entry=7, name=name@entry=0x0, handle_delayed_action=handle_delayed_action@entry=1)
at platform/nm-linux-platform.c:1954
#19 0x00005555555d5177 in do_change_link (platform=platform@entry=0x55555594c8b0, nlo=nlo@entry=0x55555597f0f0, complete_from_cache=complete_from_cache@entry=1) at platform/nm-linux-platform.c:2753
#20 0x00005555555d56b4 in link_enslave (platform=0x55555594c8b0, master=0, slave=7) at platform/nm-linux-platform.c:3141
#21 0x00005555555976de in release_slave (device=0x5555559d2a40, slave=0x5555559c6be0, configure=<optimized out>) at devices/nm-device-bond.c:437
#22 0x00005555555b7bc3 in nm_device_release_one_slave (self=self@entry=0x5555559d2a40, slave=0x5555559c6be0, configure=configure@entry=1, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:1049
#23 0x00005555555b7f0e in nm_device_master_release_slaves (self=self@entry=0x5555559d2a40) at devices/nm-device.c:1781
#24 0x00005555555b9592 in nm_device_cleanup (self=0x5555559d2a40, reason=<optimized out>, deconfigure=1) at devices/nm-device.c:7752
#25 0x00005555555ba161 in _set_state_full (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED, quitting=quitting@entry=0) at devices/nm-device.c:8128
#26 0x00005555555bb297 in nm_device_state_changed (self=self@entry=0x5555559d2a40, state=state@entry=NM_DEVICE_STATE_DISCONNECTED, reason=reason@entry=NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
at devices/nm-device.c:8319
#27 0x00005555555bd9a5 in queued_set_state (user_data=<optimized out>) at devices/nm-device.c:8343
#28 0x00007ffff4a4c79a in g_main_context_dispatch () at /lib64/libglib-2.0.so.0
#29 0x00007ffff4a4cae8 in g_main_context_iterate.isra.24 () at /lib64/libglib-2.0.so.0
#30 0x00007ffff4a4cdba in g_main_loop_run () at /lib64/libglib-2.0.so.0
#31 0x000055555559556f in main (argc=1, argv=0x7fffffffdb88) at main.c:518
2015-06-24 13:42:16 +02:00
|
|
|
}
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
|
|
|
|
|
2015-05-04 16:54:51 +02:00
|
|
|
static void
|
|
|
|
|
platform_query_devices(NMManager *self)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2017-07-04 22:29:27 +02:00
|
|
|
gs_unref_ptrarray GPtrArray *links = NULL;
|
2015-05-04 16:54:51 +02:00
|
|
|
int i;
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
gboolean guess_assume;
|
2021-11-09 13:28:54 +01:00
|
|
|
gs_free char *order = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
guess_assume = nm_config_get_first_start(nm_config_get());
|
2017-10-24 08:35:42 +02:00
|
|
|
order = nm_config_data_get_value(NM_CONFIG_GET_DATA,
|
|
|
|
|
NM_CONFIG_KEYFILE_GROUP_MAIN,
|
|
|
|
|
NM_CONFIG_KEYFILE_KEY_MAIN_SLAVES_ORDER,
|
|
|
|
|
NM_CONFIG_GET_VALUE_STRIP);
|
2017-09-29 15:11:33 +02:00
|
|
|
links = nm_platform_link_get_all(priv->platform, !nm_streq0(order, "index"));
|
2017-07-04 22:29:27 +02:00
|
|
|
if (!links)
|
|
|
|
|
return;
|
|
|
|
|
for (i = 0; i < links->len; i++) {
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMPlatformLink *link = NMP_OBJECT_CAST_LINK(links->pdata[i]);
|
2017-12-06 13:09:31 +01:00
|
|
|
const NMConfigDeviceStateData *dev_state;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-12-06 13:09:31 +01:00
|
|
|
dev_state = nm_config_device_state_get(priv->config, link->ifindex);
|
2016-09-23 17:36:21 +02:00
|
|
|
platform_link_added(self,
|
2017-07-04 22:29:27 +02:00
|
|
|
link->ifindex,
|
|
|
|
|
link,
|
device: assume matching connections during first start
Since commit 2d1b85f (th/assume-vs-unmanaged-bgo746440), we clearly
distinguish between two modes when encountering devices with external
IP configuration:
a) external devices. For those devices we generate a volatile in-memory
connection and pretend it's active. However, the device must not be
touched by NetworkManager in any way.
b) assume, seamless take over. Mostly for restart of NetworkManager,
we activate a connection gracefully without going through an down-up
cycle. After the device reaches activated state, the device is
considered fully managed. For this only an existing, non volatile
connection can be used.
Before 'th/assume-vs-unmanaged-bgo746440', the behaviors were not
clearly separated.
Since then, we only choose to assume a connection (b) when the state
file indicates a matching connection. Now, extend this to also assume
connections when:
- during first-start (not after a restart) when there is no
state file yet.
- and, if we have an existing, non volatile, connection which
matches the device's configuration.
This patch lets NetworkManager assume connection also on first start.
That is for example useful when handing over network configuration from
initrd.
This only applies to existing, permanent, matching(!) connections, so it is a
good guess that the user wants NM to take over this interface. This brings us
closer to the previous behavior before 'th/assume-vs-unmanaged-bgo746440'.
https://bugzilla.redhat.com/show_bug.cgi?id=1439220
(cherry picked from commit 27b2477cb7dad2410c88c7dfca51f3aad208b881)
2017-04-19 16:16:12 +02:00
|
|
|
guess_assume && (!dev_state || !dev_state->connection_uuid),
|
2016-09-23 17:36:21 +02:00
|
|
|
dev_state);
|
|
|
|
|
}
|
2015-05-04 16:54:51 +02:00
|
|
|
}
|
|
|
|
|
|
2008-04-29 Dan Williams <dcbw@redhat.com>
Handle HAL dropouts better; allow NM to start up even if HAL isn't up yet.
* marshallers/nm-marshal.list
- Add marshaller
* src/NetworkManager.c
- (main): let the NMManager handle the NMHalManager
* src/nm-hal-manager.c
src/nm-hal-manager.h
- convert to a GObject, and emit singals when stuff changes. Let the
NMManager handle the signals, instead of the NMHalManager calling
into the NMManager.
* src/nm-manager.c
src/nm-manager.h
- (remove_one_device): consolidate device removals here
- (dispose): use remove_one_device()
- (nm_manager_get_device_by_udi): make static
- (deferred_hal_manager_query_devices): idle handler to query the HAL
manager for devices at startup or wakeup time
- (nm_manager_new): create and monitor the HAL manager
- (hal_manager_udi_added_cb): new function; do what
nm_manager_add_device() used to do when signalled by the hal manager
- (hal_manager_udi_removed_cb): new function; do what
nm_manager_remove_device() used to do when signalled by the hal
manager
- (hal_manager_rfkill_changed_cb): handle rfkill changes from the
hal manager
- (hal_manager_hal_reappeared_cb): when HAL comes back, remove devices
in our device list that aren't known to HAL
- (impl_manager_sleep): on wakeup, re-add devices from an idle handler;
see comments on nm-hal-manager.c::nm_manager_state_changed() a few
commits ago
- (nm_manager_get_device_by_path, nm_manager_is_udi_managed,
nm_manager_activation_pending, nm_manager_wireless_enabled,
nm_manager_wireless_hardware_enabled,
nm_manager_set_wireless_hardware_enabled): remove, unused
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3619 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-29 23:03:00 +00:00
|
|
|
static void
|
2022-02-02 12:04:27 +01:00
|
|
|
rfkill_manager_rfkill_changed_cb(NMRfkillManager *rfkill_mgr,
|
|
|
|
|
/* NMRfkillType */ guint rtype,
|
|
|
|
|
/* NMRfkillState */ guint udev_state,
|
|
|
|
|
gpointer user_data)
|
2007-02-08 15:34:26 +00:00
|
|
|
{
|
2022-02-02 12:04:27 +01:00
|
|
|
nm_assert(rtype < NM_RFKILL_TYPE_MAX);
|
|
|
|
|
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update(NM_MANAGER(user_data), rtype);
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
const CList *
|
2008-04-29 Dan Williams <dcbw@redhat.com>
Handle HAL dropouts better; allow NM to start up even if HAL isn't up yet.
* marshallers/nm-marshal.list
- Add marshaller
* src/NetworkManager.c
- (main): let the NMManager handle the NMHalManager
* src/nm-hal-manager.c
src/nm-hal-manager.h
- convert to a GObject, and emit singals when stuff changes. Let the
NMManager handle the signals, instead of the NMHalManager calling
into the NMManager.
* src/nm-manager.c
src/nm-manager.h
- (remove_one_device): consolidate device removals here
- (dispose): use remove_one_device()
- (nm_manager_get_device_by_udi): make static
- (deferred_hal_manager_query_devices): idle handler to query the HAL
manager for devices at startup or wakeup time
- (nm_manager_new): create and monitor the HAL manager
- (hal_manager_udi_added_cb): new function; do what
nm_manager_add_device() used to do when signalled by the hal manager
- (hal_manager_udi_removed_cb): new function; do what
nm_manager_remove_device() used to do when signalled by the hal
manager
- (hal_manager_rfkill_changed_cb): handle rfkill changes from the
hal manager
- (hal_manager_hal_reappeared_cb): when HAL comes back, remove devices
in our device list that aren't known to HAL
- (impl_manager_sleep): on wakeup, re-add devices from an idle handler;
see comments on nm-hal-manager.c::nm_manager_state_changed() a few
commits ago
- (nm_manager_get_device_by_path, nm_manager_is_udi_managed,
nm_manager_activation_pending, nm_manager_wireless_enabled,
nm_manager_wireless_hardware_enabled,
nm_manager_set_wireless_hardware_enabled): remove, unused
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3619 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-29 23:03:00 +00:00
|
|
|
nm_manager_get_devices(NMManager *manager)
|
2007-02-08 15:34:26 +00:00
|
|
|
{
|
|
|
|
|
g_return_val_if_fail(NM_IS_MANAGER(manager), NULL);
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
return &NM_MANAGER_GET_PRIVATE(manager)->devices_lst_head;
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
|
|
|
|
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
typedef enum {
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_NONE,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_UNMANAGED,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_UNAVAILABLE,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_DEACTIVATING,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_ACTIVATING,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_ACTIVATED,
|
|
|
|
|
DEVICE_ACTIVATION_PRIO_DISCONNECTED,
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
_DEVICE_ACTIVATION_PRIO_BEST = DEVICE_ACTIVATION_PRIO_DISCONNECTED,
|
|
|
|
|
} DeviceActivationPrio;
|
|
|
|
|
|
|
|
|
|
static DeviceActivationPrio
|
|
|
|
|
_device_get_activation_prio(NMDevice *device)
|
|
|
|
|
{
|
|
|
|
|
if (!nm_device_get_managed(device, TRUE))
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_NONE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
switch (nm_device_get_state(device)) {
|
|
|
|
|
case NM_DEVICE_STATE_DISCONNECTED:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_DISCONNECTED;
|
|
|
|
|
case NM_DEVICE_STATE_ACTIVATED:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_ACTIVATED;
|
|
|
|
|
case NM_DEVICE_STATE_PREPARE:
|
|
|
|
|
case NM_DEVICE_STATE_CONFIG:
|
|
|
|
|
case NM_DEVICE_STATE_NEED_AUTH:
|
|
|
|
|
case NM_DEVICE_STATE_IP_CONFIG:
|
|
|
|
|
case NM_DEVICE_STATE_IP_CHECK:
|
|
|
|
|
case NM_DEVICE_STATE_SECONDARIES:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_ACTIVATING;
|
|
|
|
|
case NM_DEVICE_STATE_DEACTIVATING:
|
|
|
|
|
case NM_DEVICE_STATE_FAILED:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_DEACTIVATING;
|
|
|
|
|
case NM_DEVICE_STATE_UNAVAILABLE:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_UNAVAILABLE;
|
|
|
|
|
case NM_DEVICE_STATE_UNKNOWN:
|
|
|
|
|
case NM_DEVICE_STATE_UNMANAGED:
|
|
|
|
|
return DEVICE_ACTIVATION_PRIO_UNMANAGED;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
g_return_val_if_reached(DEVICE_ACTIVATION_PRIO_UNAVAILABLE);
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-03 16:15:37 +01:00
|
|
|
static NMDevice *
|
2021-11-09 13:28:54 +01:00
|
|
|
nm_manager_get_best_device_for_connection(NMManager *self,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMConnection *connection,
|
2017-02-24 09:08:02 +01:00
|
|
|
gboolean for_user_request,
|
2021-11-09 13:28:54 +01:00
|
|
|
GHashTable *unavailable_devices,
|
|
|
|
|
GError **error)
|
2015-02-03 16:15:37 +01:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2018-04-22 15:21:52 +02:00
|
|
|
NMActiveConnectionState ac_state;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMActiveConnection *ac;
|
|
|
|
|
NMDevice *ac_device;
|
|
|
|
|
NMDevice *device;
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
struct {
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
DeviceActivationPrio prio;
|
|
|
|
|
} best = {
|
|
|
|
|
.device = NULL,
|
|
|
|
|
.prio = DEVICE_ACTIVATION_PRIO_NONE,
|
|
|
|
|
};
|
2015-10-19 14:14:17 +02:00
|
|
|
NMDeviceCheckConAvailableFlags flags;
|
2021-11-09 13:28:54 +01:00
|
|
|
gs_unref_ptrarray GPtrArray *all_ac_arr = NULL;
|
|
|
|
|
gs_free_error GError *local_best = NULL;
|
|
|
|
|
NMConnectionMultiConnect multi_connect;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert(!sett_conn || NM_IS_SETTINGS_CONNECTION(sett_conn));
|
|
|
|
|
nm_assert(!connection || NM_IS_CONNECTION(connection));
|
|
|
|
|
nm_assert(sett_conn || connection);
|
|
|
|
|
nm_assert(!connection || !sett_conn
|
|
|
|
|
|| connection == nm_settings_connection_get_connection(sett_conn));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!connection)
|
|
|
|
|
connection = nm_settings_connection_get_connection(sett_conn);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-10-17 12:21:33 +02:00
|
|
|
multi_connect = _nm_connection_get_multi_connect(connection);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: ignore unmanaged devices for explicit activation request depending on multi-connect
When a device is unmanaged, an explicit activation request can
still activate it. In particular, that is the case for
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
It is also the case, for plain
$ nmcli connection up "$PROFILE"
where NetworkManager searches for a suitable device -- depending on
multi-connect setting of the profile.
The idea is, that a profile with "multi-connect=single" is expected
to sufficently and uniquely match a device, based on matching properties
like "connection.interface-name". In that case, an explicit activation
request from the user shows the intent to manage the device.
Note that it's hard to understand whether the profile really uniquely
selects a particular device. For example, if the profile doesn't specify
"connection.interface-name", it might still uniquely identify
an ethernet device, if you only have one such device.
On the other hand, with "connection.multi-connect" other than "single",
it is very much expected that the profile does not strictly match
one device.
Change the behavior here for multi-connect profiles. This allows the
user to block individual devices from activation via
$ nmcli device set "$DEVICE" managed not
A subsequent
$ nmcli connection up "$MULTI_PROFILE"
will not consider "$DEVICE" as suitable candidate for activation.
Likewise, in the future we may want to add a
$ nmcli connection up --all "$MULTI_PROFILE"
command, to activate the profile on all suitable device.
In that case again, unmanaged devices probably also should be skipped
for multi-connect profiles.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
2018-10-17 11:33:02 +02:00
|
|
|
if (!for_user_request)
|
|
|
|
|
flags = NM_DEVICE_CHECK_CON_AVAILABLE_NONE;
|
|
|
|
|
else {
|
|
|
|
|
/* if the profile is multi-connect=single, we also consider devices which
|
2019-01-11 17:07:03 -02:00
|
|
|
* are marked as unmanaged. And explicit user-request shows sufficient user
|
core: ignore unmanaged devices for explicit activation request depending on multi-connect
When a device is unmanaged, an explicit activation request can
still activate it. In particular, that is the case for
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
It is also the case, for plain
$ nmcli connection up "$PROFILE"
where NetworkManager searches for a suitable device -- depending on
multi-connect setting of the profile.
The idea is, that a profile with "multi-connect=single" is expected
to sufficently and uniquely match a device, based on matching properties
like "connection.interface-name". In that case, an explicit activation
request from the user shows the intent to manage the device.
Note that it's hard to understand whether the profile really uniquely
selects a particular device. For example, if the profile doesn't specify
"connection.interface-name", it might still uniquely identify
an ethernet device, if you only have one such device.
On the other hand, with "connection.multi-connect" other than "single",
it is very much expected that the profile does not strictly match
one device.
Change the behavior here for multi-connect profiles. This allows the
user to block individual devices from activation via
$ nmcli device set "$DEVICE" managed not
A subsequent
$ nmcli connection up "$MULTI_PROFILE"
will not consider "$DEVICE" as suitable candidate for activation.
Likewise, in the future we may want to add a
$ nmcli connection up --all "$MULTI_PROFILE"
command, to activate the profile on all suitable device.
In that case again, unmanaged devices probably also should be skipped
for multi-connect profiles.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
2018-10-17 11:33:02 +02:00
|
|
|
* intent to make the device managed.
|
|
|
|
|
* That is also, because we expect that such profile is suitably tied
|
|
|
|
|
* to the intended device. So when an unmanaged device matches, the user's
|
|
|
|
|
* intent is clear.
|
|
|
|
|
*
|
|
|
|
|
* For multi-connect != single devices that is different. The profile
|
|
|
|
|
* is not restricted to a particular device.
|
|
|
|
|
* For that reason, plain `nmcli connection up "$MULIT_PROFILE"` seems
|
|
|
|
|
* less suitable for multi-connect profiles, because the target device is
|
|
|
|
|
* left unspecified. Anyway, if a user issues
|
|
|
|
|
*
|
|
|
|
|
* $ nmcli device set "$DEVICE" managed no
|
|
|
|
|
* $ nmcli connection up "$MULIT_PROFILE"
|
|
|
|
|
*
|
|
|
|
|
* then it is reasonable for multi-connect profiles to not consider
|
|
|
|
|
* the device a suitable candidate.
|
|
|
|
|
*
|
|
|
|
|
* This may be seen inconsistent, but I think that it makes a lot of
|
|
|
|
|
* sense. Also note that "connection.multi-connect" work quite differently
|
|
|
|
|
* in aspects like activation. E.g. `nmcli connection up` of multi-connect
|
|
|
|
|
* "single" profile, will deactivate the profile if it is active already.
|
|
|
|
|
* That is different from multi-connect profiles, where it will aim to
|
|
|
|
|
* activate the profile one more time on an hitherto disconnected device.
|
|
|
|
|
*/
|
|
|
|
|
if (multi_connect == NM_CONNECTION_MULTI_CONNECT_SINGLE)
|
|
|
|
|
flags = NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST;
|
|
|
|
|
else
|
|
|
|
|
flags = NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST
|
|
|
|
|
& ~_NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST_OVERRULE_UNMANAGED;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-10-17 12:21:33 +02:00
|
|
|
if (multi_connect == NM_CONNECTION_MULTI_CONNECT_SINGLE
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
&& (ac = active_connection_find_by_connection(self,
|
|
|
|
|
sett_conn,
|
|
|
|
|
connection,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_DEACTIVATING,
|
|
|
|
|
&all_ac_arr))) {
|
|
|
|
|
/* if we have a profile which may activate on only one device (multi-connect single), then
|
|
|
|
|
* we prefer the device on which the profile is already active. It means to reactivate
|
|
|
|
|
* the profile on the same device.
|
|
|
|
|
*
|
|
|
|
|
* If the profile can be activated on multiple devices, we don't do this. In fact, the
|
|
|
|
|
* check below for the DeviceActivationPrio will prefer devices which are not already
|
|
|
|
|
* activated (with this or another) profile. */
|
2015-02-03 16:15:37 +01:00
|
|
|
|
2018-04-22 15:21:52 +02:00
|
|
|
ac_device = nm_active_connection_get_device(ac);
|
|
|
|
|
if (ac_device
|
|
|
|
|
&& ((unavailable_devices && g_hash_table_contains(unavailable_devices, ac_device))
|
2018-06-27 16:21:43 +02:00
|
|
|
|| !nm_device_check_connection_available(ac_device, connection, flags, NULL, NULL)))
|
2018-04-22 15:21:52 +02:00
|
|
|
ac_device = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-22 15:21:52 +02:00
|
|
|
if (all_ac_arr) {
|
|
|
|
|
guint i;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-22 15:21:52 +02:00
|
|
|
ac_state = nm_active_connection_get_state(ac);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-22 15:21:52 +02:00
|
|
|
/* we found several active connections. See which one is the most suitable... */
|
|
|
|
|
nm_assert(ac == all_ac_arr->pdata[0]);
|
|
|
|
|
for (i = 1; i < all_ac_arr->len; i++) {
|
2021-11-09 13:28:54 +01:00
|
|
|
NMActiveConnection *ac2 = all_ac_arr->pdata[i];
|
|
|
|
|
NMDevice *ac_device2 = nm_active_connection_get_device(ac2);
|
2018-04-22 15:21:52 +02:00
|
|
|
NMActiveConnectionState ac_state2;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-22 15:21:52 +02:00
|
|
|
if (!ac_device2
|
|
|
|
|
|| (unavailable_devices
|
|
|
|
|
&& g_hash_table_contains(unavailable_devices, ac_device2))
|
|
|
|
|
|| !nm_device_check_connection_available(ac_device2,
|
2018-06-27 16:21:43 +02:00
|
|
|
connection,
|
2020-09-28 16:03:33 +02:00
|
|
|
flags,
|
|
|
|
|
NULL,
|
2018-04-22 15:21:52 +02:00
|
|
|
NULL))
|
|
|
|
|
continue;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-22 15:21:52 +02:00
|
|
|
ac_state2 = nm_active_connection_get_state(ac2);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-22 15:21:52 +02:00
|
|
|
if (!ac_device)
|
|
|
|
|
goto found_better;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-22 15:21:52 +02:00
|
|
|
if (ac_state == ac_state2) {
|
|
|
|
|
/* active-connections are in their list in the order in which they are connected.
|
|
|
|
|
* If we have two with same state, the later (newer) one is preferred. */
|
|
|
|
|
goto found_better;
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2018-04-22 15:21:52 +02:00
|
|
|
switch (ac_state) {
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_UNKNOWN:
|
|
|
|
|
if (NM_IN_SET(ac_state2,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATING,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATED,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_DEACTIVATING))
|
|
|
|
|
goto found_better;
|
|
|
|
|
break;
|
|
|
|
|
case NM_ACTIVE_CONNECTION_STATE_ACTIVATING:
|
|
|
|
|
if (NM_IN_SET(ac_state2, NM_ACTIVE_CONNECTION_STATE_ACTIVATED))
|
|
|
|
|
goto found_better;
|
2020-09-28 16:03:33 +02:00
|
|
|
break;
|
2018-04-22 15:21:52 +02:00
|
|
|
case NM_ACTIVE_CONNECTION_STATE_ACTIVATED:
|
2020-09-28 16:03:33 +02:00
|
|
|
break;
|
2018-04-22 15:21:52 +02:00
|
|
|
case NM_ACTIVE_CONNECTION_STATE_DEACTIVATING:
|
|
|
|
|
if (NM_IN_SET(ac_state2,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATING,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATED))
|
|
|
|
|
goto found_better;
|
2020-09-28 16:03:33 +02:00
|
|
|
break;
|
|
|
|
|
default:
|
2018-04-22 15:21:52 +02:00
|
|
|
nm_assert_not_reached();
|
|
|
|
|
goto found_better;
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2017-02-24 09:08:02 +01:00
|
|
|
continue;
|
2018-04-22 15:21:52 +02:00
|
|
|
found_better:
|
|
|
|
|
ac = ac2;
|
|
|
|
|
ac_state = ac_state2;
|
|
|
|
|
ac_device = ac_device2;
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
}
|
2018-04-22 15:21:52 +02:00
|
|
|
|
|
|
|
|
if (ac_device)
|
|
|
|
|
return ac_device;
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2018-04-22 15:21:52 +02:00
|
|
|
|
|
|
|
|
/* Pick the first device that's compatible with the connection. */
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2021-11-09 13:28:54 +01:00
|
|
|
GError *local = NULL;
|
2018-04-22 15:21:52 +02:00
|
|
|
DeviceActivationPrio prio;
|
|
|
|
|
|
|
|
|
|
if (unavailable_devices && g_hash_table_contains(unavailable_devices, device))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* determine the priority of this device. Currently, this priority is independent
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
* of the profile (connection) and the device's details (aside the state).
|
2018-10-17 12:21:49 +02:00
|
|
|
*
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
* Maybe nm_device_check_connection_available() should instead return a priority,
|
2018-10-17 12:21:49 +02:00
|
|
|
* as it has more information available.
|
|
|
|
|
*
|
|
|
|
|
* For example, if you have multiple Wi-Fi devices, currently a user-request would
|
|
|
|
|
* also select the device if the AP is not visible. Optimally, if one of the two
|
|
|
|
|
* devices sees the AP and the other one doesn't, the former would be preferred.
|
|
|
|
|
* For that, the priority would need to be determined by nm_device_check_connection_available(). */
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
prio = _device_get_activation_prio(device);
|
|
|
|
|
if (prio <= best.prio && best.device) {
|
|
|
|
|
/* we already have a matching device with a better priority. This candidate
|
2018-10-17 12:21:49 +02:00
|
|
|
* cannot be better. Skip the check.
|
|
|
|
|
*
|
|
|
|
|
* Also note, that below we collect the best error message @local_best.
|
|
|
|
|
* Since we already have best.device, the error message does not matter
|
|
|
|
|
* either, and we can skip nm_device_check_connection_available() altogether. */
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
continue;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-10 11:50:21 +02:00
|
|
|
if (nm_device_check_connection_available(device,
|
|
|
|
|
connection,
|
|
|
|
|
flags,
|
|
|
|
|
NULL,
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
error ? &local : NULL)) {
|
|
|
|
|
if (prio == _DEVICE_ACTIVATION_PRIO_BEST) {
|
|
|
|
|
/* this device already has the best priority. It cannot get better
|
|
|
|
|
* and finish the search. */
|
|
|
|
|
return device;
|
|
|
|
|
}
|
|
|
|
|
best.prio = prio;
|
|
|
|
|
best.device = device;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-10 11:50:21 +02:00
|
|
|
if (error) {
|
|
|
|
|
gboolean reset_error;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-10 11:50:21 +02:00
|
|
|
if (!local_best)
|
|
|
|
|
reset_error = TRUE;
|
|
|
|
|
else if (local_best->domain != NM_UTILS_ERROR)
|
|
|
|
|
reset_error = (local->domain == NM_UTILS_ERROR);
|
|
|
|
|
else {
|
|
|
|
|
reset_error = (local->domain == NM_UTILS_ERROR && local_best->code < local->code);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-10 11:50:21 +02:00
|
|
|
if (reset_error) {
|
|
|
|
|
g_clear_error(&local_best);
|
|
|
|
|
g_set_error(&local_best,
|
|
|
|
|
local->domain,
|
|
|
|
|
local->code,
|
|
|
|
|
"device %s not available because %s",
|
|
|
|
|
nm_device_get_iface(device),
|
|
|
|
|
local->message);
|
|
|
|
|
}
|
|
|
|
|
g_error_free(local);
|
2015-02-03 16:15:37 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
core: improve selection of device when activating profile on any device
With
$ nmcli connection up "$PROFILE" ifname "$DEVICE"
it's clear that the user means the particular device. That also
is taken as a indication to make $DEVICE as managed, in case it was
unmanaged before. So, this command implies a previous
$ nmcli device set $DEVICE managed yes
On the other hand, if the user just issues
$ nmcli connection up "$PROFILE"
without a particular device, then we should prefer devices which
are marked as managed instead of unmanaged once.
Likewise, we should consider the device's state when selecting
a device. This means, when activating a profile which is activatable on
multiple devices, it will now prefer devices which are not already
active. The exception to this is that if the profile itself is already
active (and multi-connect "single"), then it will prefer to re-activate
the profile on the same device. This was done previously already. What's
new is that if the the profile is not multi-connect "single", the said
exception no longer applies, and we prefer to activate the profile on a
hitherto unactivated device.
https://bugzilla.redhat.com/show_bug.cgi?id=1639254
https://github.com/NetworkManager/NetworkManager/pull/232
2018-10-15 15:23:22 +02:00
|
|
|
if (best.device)
|
|
|
|
|
return best.device;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-10 11:50:21 +02:00
|
|
|
if (error) {
|
|
|
|
|
if (local_best)
|
|
|
|
|
g_propagate_error(error, g_steal_pointer(&local_best));
|
|
|
|
|
else {
|
|
|
|
|
nm_utils_error_set_literal(error, NM_UTILS_ERROR_UNKNOWN, "no suitable device found");
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-02-03 16:15:37 +01:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
static const char **
|
|
|
|
|
_get_devices_paths(NMManager *self, gboolean all_devices)
|
2007-08-26 15:55:27 +00:00
|
|
|
{
|
2015-04-15 14:53:30 -04:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
const char **paths = NULL;
|
2015-08-18 13:42:20 +02:00
|
|
|
guint i;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2007-08-26 15:55:27 +00:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
paths = g_new(const char *, c_list_length(&priv->devices_lst_head) + 1);
|
2015-08-18 13:42:20 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
i = 0;
|
|
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2015-08-18 13:42:20 +02:00
|
|
|
const char *path;
|
2007-08-26 15:55:27 +00:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
path = nm_dbus_object_get_path(NM_DBUS_OBJECT(device));
|
|
|
|
|
if (!path)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (!all_devices && !nm_device_is_real(device))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
paths[i++] = path;
|
2015-08-18 13:42:20 +02:00
|
|
|
}
|
|
|
|
|
paths[i++] = NULL;
|
2015-04-15 14:53:30 -04:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
return paths;
|
2007-08-26 15:55:27 +00:00
|
|
|
}
|
|
|
|
|
|
2014-10-06 11:21:54 -05:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_get_devices(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
2014-10-06 11:21:54 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(obj);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
gs_free const char **paths = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
paths = _get_devices_paths(self, FALSE);
|
|
|
|
|
g_dbus_method_invocation_return_value(invocation, g_variant_new("(^ao)", (char **) paths));
|
2014-10-06 11:21:54 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_get_all_devices(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
2014-10-06 11:21:54 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(obj);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
gs_free const char **paths = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
paths = _get_devices_paths(self, TRUE);
|
|
|
|
|
g_dbus_method_invocation_return_value(invocation, g_variant_new("(^ao)", (char **) paths));
|
2014-10-06 11:21:54 -05:00
|
|
|
}
|
|
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_get_device_by_ip_iface(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER(obj);
|
|
|
|
|
NMDevice *device;
|
2011-04-22 12:27:55 -05:00
|
|
|
const char *path = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const char *iface;
|
|
|
|
|
|
|
|
|
|
g_variant_get(parameters, "(&s)", &iface);
|
2011-04-22 12:27:55 -05:00
|
|
|
|
|
|
|
|
device = find_device_by_ip_iface(self, iface);
|
2015-04-15 14:53:30 -04:00
|
|
|
if (device)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
path = nm_dbus_object_get_path(NM_DBUS_OBJECT(device));
|
2011-04-22 12:27:55 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
if (!path) {
|
|
|
|
|
g_dbus_method_invocation_return_error(invocation,
|
2015-04-15 14:53:30 -04:00
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"No device found for the requested iface.");
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
return;
|
2011-04-22 12:27:55 -05:00
|
|
|
}
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
|
|
|
|
|
g_dbus_method_invocation_return_value(invocation, g_variant_new("(o)", path));
|
2011-04-22 12:27:55 -05:00
|
|
|
}
|
|
|
|
|
|
2014-01-29 13:22:53 -05:00
|
|
|
static gboolean
|
|
|
|
|
is_compatible_with_slave(NMConnection *master, NMConnection *slave)
|
|
|
|
|
{
|
|
|
|
|
NMSettingConnection *s_con;
|
|
|
|
|
|
|
|
|
|
g_return_val_if_fail(master, FALSE);
|
|
|
|
|
g_return_val_if_fail(slave, FALSE);
|
|
|
|
|
|
|
|
|
|
s_con = nm_connection_get_setting_connection(slave);
|
|
|
|
|
g_assert(s_con);
|
|
|
|
|
|
|
|
|
|
return nm_connection_is_type(master, nm_setting_connection_get_slave_type(s_con));
|
|
|
|
|
}
|
|
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
/**
|
|
|
|
|
* find_master:
|
|
|
|
|
* @self: #NMManager object
|
|
|
|
|
* @connection: the #NMConnection to find the master connection and device for
|
|
|
|
|
* @device: the #NMDevice, if any, which will activate @connection
|
|
|
|
|
* @out_master_connection: on success, the master connection of @connection if
|
|
|
|
|
* that master connection was found
|
|
|
|
|
* @out_master_device: on success, the master device of @connection if that
|
|
|
|
|
* master device was found
|
2014-01-28 17:24:26 -05:00
|
|
|
* @out_master_ac: on success, the master ActiveConnection of @connection if
|
|
|
|
|
* there already is one
|
2014-01-29 11:48:03 -05:00
|
|
|
* @error: the error, if an error occurred
|
2012-02-26 17:27:42 -06:00
|
|
|
*
|
2014-01-29 13:22:53 -05:00
|
|
|
* Given an #NMConnection, attempts to find its master. If @connection has
|
|
|
|
|
* no master, this will return %TRUE and @out_master_connection and
|
|
|
|
|
* @out_master_device will be untouched.
|
|
|
|
|
*
|
|
|
|
|
* If @connection does have a master, then the outputs depend on what is in its
|
|
|
|
|
* #NMSettingConnection:master property:
|
|
|
|
|
*
|
2014-01-28 17:24:26 -05:00
|
|
|
* If "master" is the ifname of an existing #NMDevice, and that device has a
|
|
|
|
|
* compatible master connection activated or activating on it, then
|
|
|
|
|
* @out_master_device, @out_master_connection, and @out_master_ac will all be
|
|
|
|
|
* set. If the device exists and is idle, only @out_master_device will be set.
|
|
|
|
|
* If the device exists and has an incompatible connection on it, an error
|
|
|
|
|
* will be returned.
|
2014-01-29 13:22:53 -05:00
|
|
|
*
|
|
|
|
|
* If "master" is the ifname of a non-existent device, then @out_master_device
|
|
|
|
|
* will be %NULL, and @out_master_connection will be a connection whose
|
2014-01-28 17:24:26 -05:00
|
|
|
* activation would cause the creation of that device. @out_master_ac MAY be
|
|
|
|
|
* set in this case as well (if the connection has started activating, but has
|
|
|
|
|
* not yet created its device).
|
2014-01-29 13:22:53 -05:00
|
|
|
*
|
2014-01-29 13:22:53 -05:00
|
|
|
* If "master" is the UUID of a compatible master connection, then
|
|
|
|
|
* @out_master_connection will be the identified connection, and @out_master_device
|
2014-01-28 17:24:26 -05:00
|
|
|
* and/or @out_master_ac will be set if the connection is currently activating.
|
|
|
|
|
* (@out_master_device will not be set if the device exists but does not have
|
|
|
|
|
* @out_master_connection active/activating on it.)
|
2012-02-26 17:27:42 -06:00
|
|
|
*
|
|
|
|
|
* Returns: %TRUE if the master device and/or connection could be found or if
|
|
|
|
|
* the connection did not require a master, %FALSE otherwise
|
|
|
|
|
**/
|
|
|
|
|
static gboolean
|
2021-11-09 13:28:54 +01:00
|
|
|
find_master(NMManager *self,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
NMDevice *device,
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection **out_master_connection,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice **out_master_device,
|
|
|
|
|
NMActiveConnection **out_master_ac,
|
|
|
|
|
GError **error)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMSettingConnection *s_con;
|
|
|
|
|
const char *master;
|
|
|
|
|
NMDevice *master_device = NULL;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *master_connection;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
s_con = nm_connection_get_setting_connection(connection);
|
|
|
|
|
g_assert(s_con);
|
|
|
|
|
master = nm_setting_connection_get_master(s_con);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
if (master == NULL)
|
|
|
|
|
return TRUE; /* success, but no master */
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
/* Try as an interface name first */
|
2014-10-15 21:17:45 -05:00
|
|
|
master_device = find_device_by_iface(self, master, NULL, connection);
|
2012-02-26 17:27:42 -06:00
|
|
|
if (master_device) {
|
2014-01-29 11:48:03 -05:00
|
|
|
if (master_device == device) {
|
|
|
|
|
g_set_error_literal(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
|
|
|
|
"Device cannot be its own master");
|
2012-02-26 17:27:42 -06:00
|
|
|
return FALSE;
|
2014-01-29 11:48:03 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-07-14 16:53:24 +02:00
|
|
|
master_connection = nm_device_get_settings_connection(master_device);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (master_connection
|
|
|
|
|
&& !is_compatible_with_slave(nm_settings_connection_get_connection(master_connection),
|
|
|
|
|
connection)) {
|
2014-01-29 13:22:53 -05:00
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
2016-03-30 09:00:06 +02:00
|
|
|
"The active connection on %s is not compatible",
|
|
|
|
|
nm_device_get_iface(master_device));
|
2014-01-29 13:22:53 -05:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
2012-02-26 17:27:42 -06:00
|
|
|
} else {
|
|
|
|
|
/* Try master as a connection UUID */
|
2015-07-14 16:53:24 +02:00
|
|
|
master_connection = nm_settings_get_connection_by_uuid(priv->settings, master);
|
2012-02-26 17:27:42 -06:00
|
|
|
if (master_connection) {
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *candidate;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
/* Check if the master connection is activated on some device already */
|
|
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2012-02-26 17:27:42 -06:00
|
|
|
if (candidate == device)
|
|
|
|
|
continue;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-07-14 16:53:24 +02:00
|
|
|
if (nm_device_get_settings_connection(candidate) == master_connection) {
|
2012-02-26 17:27:42 -06:00
|
|
|
master_device = candidate;
|
|
|
|
|
break;
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
}
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
if (out_master_connection)
|
|
|
|
|
*out_master_connection = master_connection;
|
|
|
|
|
if (out_master_device)
|
|
|
|
|
*out_master_device = master_device;
|
2017-03-13 14:01:47 +01:00
|
|
|
if (out_master_ac && master_connection) {
|
2018-04-19 15:27:54 +02:00
|
|
|
*out_master_ac = active_connection_find(self,
|
|
|
|
|
master_connection,
|
|
|
|
|
NULL,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_DEACTIVATING,
|
2021-05-13 10:49:39 +02:00
|
|
|
FALSE,
|
2018-04-19 15:27:54 +02:00
|
|
|
NULL);
|
2017-03-13 14:01:47 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-29 11:48:03 -05:00
|
|
|
if (master_device || master_connection)
|
|
|
|
|
return TRUE;
|
|
|
|
|
else {
|
|
|
|
|
g_set_error_literal(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"Master connection not found or invalid");
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ensure_master_active_connection:
|
|
|
|
|
* @self: the #NMManager
|
2013-07-29 13:11:47 -05:00
|
|
|
* @subject: the #NMAuthSubject representing the requestor of this activation
|
2012-02-26 17:27:42 -06:00
|
|
|
* @connection: the connection that should depend on @master_connection
|
|
|
|
|
* @device: the #NMDevice, if any, which will activate @connection
|
2014-01-29 13:22:53 -05:00
|
|
|
* @master_connection: the master connection, or %NULL
|
|
|
|
|
* @master_device: the master device, or %NULL
|
2018-03-28 17:18:04 +02:00
|
|
|
* @activation_reason: the reason for activation
|
2012-02-26 17:27:42 -06:00
|
|
|
* @error: the error, if an error occurred
|
|
|
|
|
*
|
|
|
|
|
* Determines whether a given #NMConnection depends on another connection to
|
|
|
|
|
* be activated, and if so, finds that master connection or creates it.
|
|
|
|
|
*
|
2014-01-29 13:22:53 -05:00
|
|
|
* If @master_device and @master_connection are both set then @master_connection
|
2014-01-29 13:22:53 -05:00
|
|
|
* MUST already be activated or activating on @master_device, and the function will
|
|
|
|
|
* return the existing #NMActiveConnection.
|
2014-01-29 13:22:53 -05:00
|
|
|
*
|
2014-01-29 13:22:53 -05:00
|
|
|
* If only @master_device is set, and it has an #NMActiveConnection, then the
|
|
|
|
|
* function will return it if it is a compatible master, or an error if not. If it
|
|
|
|
|
* doesn't have an AC, then the function will create one if a compatible master
|
|
|
|
|
* connection exists, or return an error if not.
|
2014-01-29 13:22:53 -05:00
|
|
|
*
|
|
|
|
|
* If only @master_connection is set, then this will try to find or create a compatible
|
|
|
|
|
* #NMDevice, and either activate @master_connection on that device or return an error.
|
|
|
|
|
*
|
2012-02-26 17:27:42 -06:00
|
|
|
* Returns: the master #NMActiveConnection that the caller should depend on, or
|
|
|
|
|
* %NULL if an error occurred
|
|
|
|
|
*/
|
|
|
|
|
static NMActiveConnection *
|
2021-11-09 13:28:54 +01:00
|
|
|
ensure_master_active_connection(NMManager *self,
|
|
|
|
|
NMAuthSubject *subject,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
NMDevice *device,
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *master_connection,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *master_device,
|
2018-03-28 17:18:04 +02:00
|
|
|
NMActivationReason activation_reason,
|
2021-11-09 13:28:54 +01:00
|
|
|
GError **error)
|
2012-02-26 17:27:42 -06:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
NMActiveConnection *ac;
|
2012-02-26 17:27:42 -06:00
|
|
|
NMActiveConnection *master_ac = NULL;
|
|
|
|
|
NMDeviceState master_state;
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
gboolean bind_lifetime_to_profile_visibility;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
g_return_val_if_fail(connection, NULL);
|
|
|
|
|
g_return_val_if_fail(master_connection || master_device, FALSE);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
bind_lifetime_to_profile_visibility =
|
|
|
|
|
NM_FLAGS_HAS(nm_device_get_activation_state_flags(device),
|
|
|
|
|
NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
/* If the master device isn't activated then we need to activate it using
|
|
|
|
|
* compatible connection. If it's already activating we can just proceed.
|
|
|
|
|
*/
|
|
|
|
|
if (master_device) {
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *device_connection = nm_device_get_settings_connection(master_device);
|
2014-01-29 13:22:53 -05:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
/* If we're passed a connection and a device, we require that connection
|
|
|
|
|
* be already activated on the device, eg returned from find_master().
|
|
|
|
|
*/
|
core: fix activation of slave when master is not active, but device exists
NM fails to activate a slave if the master device already exists
but has not active connection.
One way to reproduce, create a bond master/slave configuration and
ensure that the master device exists (e.g. by activating the bond, and
killing NM without taking down the device, or externally via `ip link add`).
If you try to activate the slave it will fail with the following message
(in nmcli):
"Error: Connection activation failed: The active connection on MASTER is not a valid master for 'SLAVE'"
although MASTER is not active.
This also triggers the following assertion:
#0 0x0000003370c504e9 in g_logv () from /lib64/libglib-2.0.so.0
#1 0x0000003370c5063f in g_log () from /lib64/libglib-2.0.so.0
#2 0x000000000047646a in is_compatible_with_slave (master=0x0, slave=slave@entry=0xc4aa60) at nm-manager.c:2193
#3 0x000000000047e289 in ensure_master_active_connection (self=self@entry=0xc8d150, subject=0x7f23b80059e0, connection=connection@entry=0xc4aa60, device=device@entry=0xcac380, master_connection=master_connection@entry=0x0,
master_device=master_device@entry=0xc9e800, error=error@entry=0x7fffa5cc4958) at nm-manager.c:2395
#4 0x000000000047eb4a in _internal_activate_device (self=self@entry=0xc8d150, active=active@entry=0xcc33b0, error=error@entry=0x7fffa5cc4958) at nm-manager.c:2665
#5 0x000000000047ecf2 in _internal_activate_generic (self=self@entry=0xc8d150, active=active@entry=0xcc33b0, error=error@entry=0x7fffa5cc4958) at nm-manager.c:2712
#6 0x000000000047ef2b in _internal_activation_auth_done (active=0xcc33b0, success=<optimized out>, error_desc=0x0, user_data1=0xc8d150, user_data2=<optimized out>) at nm-manager.c:2848
#7 0x0000000000466fa1 in auth_done (chain=0xcef020, error=0x0, unused=<optimized out>, user_data=<optimized out>) at nm-active-connection.c:603
#8 0x00000000004753da in auth_chain_finish (user_data=0xcef020) at nm-manager-auth.c:88
#9 0x0000003370c492a6 in g_main_context_dispatch () from /lib64/libglib-2.0.so.0
#10 0x0000003370c49628 in g_main_context_iterate.isra () from /lib64/libglib-2.0.so.0
#11 0x0000003370c49a3a in g_main_loop_run () from /lib64/libglib-2.0.so.0
#12 0x0000000000429e65 in main (argc=1, argv=0x7fffa5cc4e48) at main.c:678
Signed-off-by: Thomas Haller <thaller@redhat.com>
2014-06-11 20:35:08 +02:00
|
|
|
g_assert(!master_connection || master_connection == device_connection);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (device_connection
|
|
|
|
|
&& !is_compatible_with_slave(nm_settings_connection_get_connection(device_connection),
|
|
|
|
|
connection)) {
|
2014-01-29 13:22:53 -05:00
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
2016-03-30 09:00:06 +02:00
|
|
|
"The active connection %s is not compatible",
|
2014-01-29 13:22:53 -05:00
|
|
|
nm_connection_get_id(connection));
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
master_state = nm_device_get_state(master_device);
|
|
|
|
|
if ((master_state == NM_DEVICE_STATE_ACTIVATED) || nm_device_is_activating(master_device)) {
|
|
|
|
|
/* Device already using master_connection */
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
ac = NM_ACTIVE_CONNECTION(nm_device_get_act_request(master_device));
|
|
|
|
|
g_return_val_if_fail(device_connection, ac);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
if (!bind_lifetime_to_profile_visibility) {
|
|
|
|
|
/* unbind the lifetime. */
|
|
|
|
|
nm_active_connection_set_state_flags_clear(
|
|
|
|
|
ac,
|
|
|
|
|
NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
return ac;
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-29 13:22:53 -05:00
|
|
|
/* If the device is disconnected, find a compatible connection and
|
2012-02-26 17:27:42 -06:00
|
|
|
* activate it on the device.
|
|
|
|
|
*/
|
2014-09-24 16:58:07 -05:00
|
|
|
if (master_state == NM_DEVICE_STATE_DISCONNECTED || !nm_device_is_real(master_device)) {
|
2017-02-03 15:13:03 +01:00
|
|
|
gs_free NMSettingsConnection **connections = NULL;
|
|
|
|
|
guint i;
|
2012-02-26 17:27:42 -06:00
|
|
|
|
|
|
|
|
g_assert(master_connection == NULL);
|
|
|
|
|
|
|
|
|
|
/* Find a compatible connection and activate this device using it */
|
2018-06-27 14:20:57 +02:00
|
|
|
connections = nm_manager_get_activatable_connections(self, FALSE, TRUE, NULL);
|
2017-02-03 15:13:03 +01:00
|
|
|
for (i = 0; connections[i]; i++) {
|
|
|
|
|
NMSettingsConnection *candidate = connections[i];
|
2021-11-09 13:28:54 +01:00
|
|
|
NMConnection *cand_conn = nm_settings_connection_get_connection(candidate);
|
2012-02-26 17:27:42 -06:00
|
|
|
|
2013-07-25 15:36:45 +02:00
|
|
|
/* Ensure eg bond/team slave and the candidate master is a
|
|
|
|
|
* bond/team master
|
|
|
|
|
*/
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!is_compatible_with_slave(cand_conn, connection))
|
2012-02-26 17:27:42 -06:00
|
|
|
continue;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-06-27 16:21:43 +02:00
|
|
|
if (nm_device_check_connection_available(
|
|
|
|
|
master_device,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
cand_conn,
|
2018-06-27 16:21:43 +02:00
|
|
|
NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL)) {
|
2012-02-26 17:27:42 -06:00
|
|
|
master_ac = nm_manager_activate_connection(
|
|
|
|
|
self,
|
|
|
|
|
candidate,
|
2016-09-07 17:47:26 +02:00
|
|
|
NULL,
|
2020-09-28 16:03:33 +02:00
|
|
|
NULL,
|
2012-09-13 15:57:36 -05:00
|
|
|
master_device,
|
2013-07-29 13:11:47 -05:00
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
2018-03-28 17:18:04 +02:00
|
|
|
activation_reason,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
bind_lifetime_to_profile_visibility
|
|
|
|
|
? NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY
|
|
|
|
|
: NM_ACTIVATION_STATE_FLAG_NONE,
|
2012-02-26 17:27:42 -06:00
|
|
|
error);
|
|
|
|
|
return master_ac;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_CONNECTION,
|
2016-03-30 09:00:06 +02:00
|
|
|
"No compatible connection found.");
|
2012-02-26 17:27:42 -06:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
/* Otherwise, the device is unmanaged, unavailable, or disconnecting */
|
|
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
2014-10-15 15:27:25 -04:00
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
2016-03-30 09:00:06 +02:00
|
|
|
"Device unmanaged or not available for activation");
|
2012-02-26 17:27:42 -06:00
|
|
|
} else if (master_connection) {
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
NMDevice *candidate;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
/* Find a compatible device and activate it using this connection */
|
|
|
|
|
c_list_for_each_entry (candidate, &priv->devices_lst_head, devices_lst) {
|
2012-02-26 17:27:42 -06:00
|
|
|
if (candidate == device) {
|
|
|
|
|
/* A device obviously can't be its own master */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-06-27 16:21:43 +02:00
|
|
|
if (!nm_device_check_connection_available(
|
|
|
|
|
candidate,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_connection(master_connection),
|
2018-06-27 16:21:43 +02:00
|
|
|
NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL))
|
2012-02-26 17:27:42 -06:00
|
|
|
continue;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
manager: don't bring up master connections on devices that are not disconnected
Otherwise we're likely interfering with an in-progress activation.
Consider the following connections, first two being active:
id=bond0a type=bond interface-name=bond0, (Active)
id=dummy0a type=dummy interface-name=dummy0 master=bond0a, (Active)
id=bond0b type=bond interface-name=bond0
id=dummy0b type=dummy interface-name=dummy0 master=bond0b
Note there's two hierarchies with bond0 bond having a dummy0 port,
first one (bond0a, dummy0a) being active.
Suppose the users wants to bring the other one up (bond0b, dummy0b) and
does a "nmcli c up bond0b". This is what happens:
1.) bond0b starts activation due to user request
2.) bond0a starts deactivation due to new activation
3.) dummy0 loses its master, begins deactivation
4.) dummy0 finishes deactivation
5.) both dummy0 being deactivated and bond0b check for slaves enqueues
auto-activation check for dummy0
6.) auto-activation picks dummy0a for dummy0
7.) dummy0a begins activation
8.) dummy0a looks for master connection, picks bond0a
9.) bond0a starts activating on bond0, kicks bond0b away
10.) bond0a and dummy0a end up finishing activation
11.) Everybody unhappy :(
NM's auto-activation logic is only takes autoconnect priority into
account when figuring out a connection to activate and can't be expected
to bring up most sensible combination of connection when there's
multiple ones for the same devices with complex dependencies.
Nevertheless, it shouldn't ever undo the activations if the user is
bringing up the connections manually.
This patch prevents bringing up of master devices that are not
DISCONNECTED and therefore shouldn't be up for grabs. This was
previously done for hardware devices only whereas I believe it should be
the case for *all* realized devices.
https://gitlab.freedesktop.org/NetworkManager/NetworkManager-ci/-/merge_requests/1172
https://gitlab.freedesktop.org/NetworkManager/NetworkManager/-/merge_requests/1364
2022-09-05 17:34:01 +02:00
|
|
|
if (nm_device_is_real(candidate)
|
|
|
|
|
&& nm_device_get_state(candidate) != NM_DEVICE_STATE_DISCONNECTED)
|
|
|
|
|
continue;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-01-30 12:52:42 -06:00
|
|
|
master_ac = nm_manager_activate_connection(
|
|
|
|
|
self,
|
|
|
|
|
master_connection,
|
2016-09-07 17:47:26 +02:00
|
|
|
NULL,
|
2020-09-28 16:03:33 +02:00
|
|
|
NULL,
|
2012-09-13 15:57:36 -05:00
|
|
|
candidate,
|
2013-07-29 13:11:47 -05:00
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
2018-03-28 17:18:04 +02:00
|
|
|
activation_reason,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
bind_lifetime_to_profile_visibility
|
|
|
|
|
? NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY
|
|
|
|
|
: NM_ACTIVATION_STATE_FLAG_NONE,
|
2013-01-30 12:52:42 -06:00
|
|
|
error);
|
|
|
|
|
return master_ac;
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
2016-03-30 09:00:06 +02:00
|
|
|
"No device available");
|
2012-02-26 17:27:42 -06:00
|
|
|
} else
|
|
|
|
|
g_assert_not_reached();
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
typedef struct {
|
|
|
|
|
NMSettingsConnection *connection;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2017-02-24 09:32:05 +01:00
|
|
|
} SlaveConnectionInfo;
|
|
|
|
|
|
2015-05-06 14:38:55 +02:00
|
|
|
/**
|
|
|
|
|
* find_slaves:
|
|
|
|
|
* @manager: #NMManager object
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* @sett_conn: the master #NMSettingsConnection to find slave connections for
|
|
|
|
|
* @device: the master #NMDevice for the @sett_conn
|
2017-02-24 09:32:05 +01:00
|
|
|
* @out_n_slaves: on return, the number of slaves found
|
2015-05-06 14:38:55 +02:00
|
|
|
*
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* Given an #NMSettingsConnection, attempts to find its slaves. If @sett_conn is not
|
2015-05-06 14:38:55 +02:00
|
|
|
* master, or has not any slaves, this will return %NULL.
|
|
|
|
|
*
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* Returns: an array of #SlaveConnectionInfo for given master @sett_conn, or %NULL
|
2015-05-06 14:38:55 +02:00
|
|
|
**/
|
2017-02-24 09:32:05 +01:00
|
|
|
static SlaveConnectionInfo *
|
2021-11-09 13:28:54 +01:00
|
|
|
find_slaves(NMManager *manager,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device,
|
|
|
|
|
guint *out_n_slaves,
|
2019-07-02 11:51:29 +02:00
|
|
|
gboolean for_user_request)
|
2015-05-06 14:38:55 +02:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(manager);
|
|
|
|
|
NMSettingsConnection *const *all_connections = NULL;
|
|
|
|
|
guint n_all_connections;
|
|
|
|
|
guint i;
|
|
|
|
|
SlaveConnectionInfo *slaves = NULL;
|
|
|
|
|
guint n_slaves = 0;
|
|
|
|
|
NMSettingConnection *s_con;
|
2017-02-24 09:32:05 +01:00
|
|
|
gs_unref_hashtable GHashTable *devices = NULL;
|
|
|
|
|
|
|
|
|
|
nm_assert(out_n_slaves);
|
2015-05-06 14:38:55 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
s_con = nm_connection_get_setting_connection(nm_settings_connection_get_connection(sett_conn));
|
2017-02-24 09:32:05 +01:00
|
|
|
g_return_val_if_fail(s_con, NULL);
|
|
|
|
|
|
2017-11-15 16:06:43 +01:00
|
|
|
devices = g_hash_table_new(nm_direct_hash, NULL);
|
2015-05-06 14:38:55 +02:00
|
|
|
|
|
|
|
|
/* Search through all connections, not only inactive ones, because
|
|
|
|
|
* even if a slave was already active, it might be deactivated during
|
|
|
|
|
* master reactivation.
|
|
|
|
|
*/
|
2021-06-15 00:41:45 +02:00
|
|
|
all_connections =
|
|
|
|
|
nm_settings_get_connections_sorted_by_autoconnect_priority(priv->settings,
|
|
|
|
|
&n_all_connections);
|
2017-02-24 09:32:05 +01:00
|
|
|
for (i = 0; i < n_all_connections; i++) {
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *master_connection = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *master_device = NULL, *slave_device;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *candidate = all_connections[i];
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
find_master(manager,
|
|
|
|
|
nm_settings_connection_get_connection(candidate),
|
|
|
|
|
NULL,
|
|
|
|
|
&master_connection,
|
|
|
|
|
&master_device,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL);
|
|
|
|
|
if ((master_connection && master_connection == sett_conn)
|
2015-05-06 14:38:55 +02:00
|
|
|
|| (master_device && master_device == device)) {
|
2017-02-24 09:32:05 +01:00
|
|
|
slave_device = nm_manager_get_best_device_for_connection(manager,
|
|
|
|
|
candidate,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NULL,
|
2019-07-02 11:51:29 +02:00
|
|
|
for_user_request,
|
2018-07-10 11:50:21 +02:00
|
|
|
devices,
|
|
|
|
|
NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
if (!slaves) {
|
|
|
|
|
/* what we allocate is quite likely much too large. Don't bother, it is only
|
|
|
|
|
* a temporary buffer. */
|
|
|
|
|
slaves = g_new(SlaveConnectionInfo, n_all_connections);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nm_assert(n_slaves < n_all_connections);
|
2022-09-15 16:19:24 +02:00
|
|
|
slaves[n_slaves++] = (SlaveConnectionInfo){
|
|
|
|
|
.connection = candidate,
|
|
|
|
|
.device = slave_device,
|
|
|
|
|
};
|
2017-02-24 09:32:05 +01:00
|
|
|
|
|
|
|
|
if (slave_device)
|
|
|
|
|
g_hash_table_add(devices, slave_device);
|
2015-05-06 14:38:55 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
*out_n_slaves = n_slaves;
|
|
|
|
|
|
|
|
|
|
/* Warning: returns NULL if n_slaves is zero. */
|
|
|
|
|
return slaves;
|
2015-05-06 14:38:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
should_connect_slaves(NMConnection *connection, NMDevice *device)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMSettingConnection *s_con;
|
2018-09-05 09:24:33 +02:00
|
|
|
NMSettingConnectionAutoconnectSlaves val;
|
2015-05-06 14:38:55 +02:00
|
|
|
|
|
|
|
|
s_con = nm_connection_get_setting_connection(connection);
|
|
|
|
|
g_assert(s_con);
|
|
|
|
|
|
2018-09-05 09:24:33 +02:00
|
|
|
val = nm_setting_connection_get_autoconnect_slaves(s_con);
|
|
|
|
|
if (val != NM_SETTING_CONNECTION_AUTOCONNECT_SLAVES_DEFAULT)
|
2015-05-06 14:38:55 +02:00
|
|
|
goto out;
|
|
|
|
|
|
2018-09-05 09:24:33 +02:00
|
|
|
val =
|
|
|
|
|
nm_config_data_get_connection_default_int64(NM_CONFIG_GET_DATA,
|
2018-11-14 14:52:21 +01:00
|
|
|
NM_CON_DEFAULT("connection.autoconnect-slaves"),
|
2018-09-05 09:24:33 +02:00
|
|
|
device,
|
|
|
|
|
0,
|
|
|
|
|
1,
|
|
|
|
|
-1);
|
2015-05-06 14:38:55 +02:00
|
|
|
|
|
|
|
|
out:
|
2018-09-05 09:24:33 +02:00
|
|
|
if (val == NM_SETTING_CONNECTION_AUTOCONNECT_SLAVES_NO)
|
2015-05-06 14:38:55 +02:00
|
|
|
return FALSE;
|
2018-09-05 09:24:33 +02:00
|
|
|
if (val == NM_SETTING_CONNECTION_AUTOCONNECT_SLAVES_YES)
|
2015-05-06 14:38:55 +02:00
|
|
|
return TRUE;
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
all: don't use gchar/gshort/gint/glong but C types
We commonly don't use the glib typedefs for char/short/int/long,
but their C types directly.
$ git grep '\<g\(char\|short\|int\|long\|float\|double\)\>' | wc -l
587
$ git grep '\<\(char\|short\|int\|long\|float\|double\)\>' | wc -l
21114
One could argue that using the glib typedefs is preferable in
public API (of our glib based libnm library) or where it clearly
is related to glib, like during
g_object_set (obj, PROPERTY, (gint) value, NULL);
However, that argument does not seem strong, because in practice we don't
follow that argument today, and seldomly use the glib typedefs.
Also, the style guide for this would be hard to formalize, because
"using them where clearly related to a glib" is a very loose suggestion.
Also note that glib typedefs will always just be typedefs of the
underlying C types. There is no danger of glib changing the meaning
of these typedefs (because that would be a major API break of glib).
A simple style guide is instead: don't use these typedefs.
No manual actions, I only ran the bash script:
FILES=($(git ls-files '*.[hc]'))
sed -i \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>\( [^ ]\)/\1\2/g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\> /\1 /g' \
-e 's/\<g\(char\|short\|int\|long\|float\|double\)\>/\1/g' \
"${FILES[@]}"
2018-07-11 07:40:19 +02:00
|
|
|
static int
|
2017-05-15 17:17:26 +02:00
|
|
|
compare_slaves(gconstpointer a, gconstpointer b, gpointer sort_by_name)
|
2017-03-02 16:14:18 +01:00
|
|
|
{
|
|
|
|
|
const SlaveConnectionInfo *a_info = a;
|
|
|
|
|
const SlaveConnectionInfo *b_info = b;
|
|
|
|
|
|
|
|
|
|
/* Slaves without a device at the end */
|
|
|
|
|
if (!a_info->device)
|
|
|
|
|
return 1;
|
|
|
|
|
if (!b_info->device)
|
|
|
|
|
return -1;
|
|
|
|
|
|
2017-05-15 17:17:26 +02:00
|
|
|
if (GPOINTER_TO_INT(sort_by_name)) {
|
2020-07-20 09:31:03 +02:00
|
|
|
return nm_strcmp0(nm_device_get_iface(a_info->device), nm_device_get_iface(b_info->device));
|
2017-05-15 17:17:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nm_device_get_ifindex(a_info->device) - nm_device_get_ifindex(b_info->device);
|
2017-03-02 16:14:18 +01:00
|
|
|
}
|
|
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
autoconnect_slaves(NMManager *self,
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *master_connection,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *master_device,
|
|
|
|
|
NMAuthSubject *subject,
|
2019-07-02 11:51:29 +02:00
|
|
|
gboolean for_user_request)
|
2015-05-06 14:38:55 +02:00
|
|
|
{
|
|
|
|
|
GError *local_err = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (should_connect_slaves(nm_settings_connection_get_connection(master_connection),
|
|
|
|
|
master_device)) {
|
2017-02-24 09:32:05 +01:00
|
|
|
gs_free SlaveConnectionInfo *slaves = NULL;
|
|
|
|
|
guint i, n_slaves = 0;
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
gboolean bind_lifetime_to_profile_visibility;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-07-02 11:51:29 +02:00
|
|
|
slaves = find_slaves(self, master_connection, master_device, &n_slaves, for_user_request);
|
2017-03-02 16:14:18 +01:00
|
|
|
if (n_slaves > 1) {
|
2017-10-24 08:35:42 +02:00
|
|
|
gs_free char *value = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-10-24 08:35:42 +02:00
|
|
|
value = nm_config_data_get_value(NM_CONFIG_GET_DATA,
|
|
|
|
|
NM_CONFIG_KEYFILE_GROUP_MAIN,
|
|
|
|
|
NM_CONFIG_KEYFILE_KEY_MAIN_SLAVES_ORDER,
|
|
|
|
|
NM_CONFIG_GET_VALUE_STRIP);
|
2017-03-02 16:14:18 +01:00
|
|
|
g_qsort_with_data(slaves,
|
|
|
|
|
n_slaves,
|
|
|
|
|
sizeof(slaves[0]),
|
2017-05-15 17:17:26 +02:00
|
|
|
compare_slaves,
|
|
|
|
|
GINT_TO_POINTER(!nm_streq0(value, "index")));
|
2017-03-02 16:14:18 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
bind_lifetime_to_profile_visibility =
|
|
|
|
|
n_slaves > 0
|
|
|
|
|
&& NM_FLAGS_HAS(nm_device_get_activation_state_flags(master_device),
|
|
|
|
|
NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
for (i = 0; i < n_slaves; i++) {
|
|
|
|
|
SlaveConnectionInfo *slave = &slaves[i];
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *uuid;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-09-26 16:01:27 +02:00
|
|
|
/* To avoid loops when autoconnecting slaves, we propagate
|
|
|
|
|
* the UUID of the initial connection down to slaves until
|
|
|
|
|
* the same connection is found.
|
|
|
|
|
*/
|
|
|
|
|
uuid = g_object_get_qdata(G_OBJECT(master_connection), autoconnect_root_quark());
|
2017-02-24 09:32:05 +01:00
|
|
|
if (nm_streq0(nm_settings_connection_get_uuid(slave->connection), uuid)) {
|
2016-09-26 16:01:27 +02:00
|
|
|
_LOGI(LOGD_CORE,
|
|
|
|
|
"will NOT activate slave connection '%s' (%s) as a dependency for master "
|
|
|
|
|
"'%s' (%s): "
|
|
|
|
|
"circular dependency detected",
|
2017-02-24 09:32:05 +01:00
|
|
|
nm_settings_connection_get_id(slave->connection),
|
|
|
|
|
nm_settings_connection_get_uuid(slave->connection),
|
2016-09-26 16:01:27 +02:00
|
|
|
nm_settings_connection_get_id(master_connection),
|
|
|
|
|
nm_settings_connection_get_uuid(master_connection));
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-09-26 16:01:27 +02:00
|
|
|
if (!uuid)
|
|
|
|
|
uuid = nm_settings_connection_get_uuid(master_connection);
|
2017-02-24 09:32:05 +01:00
|
|
|
g_object_set_qdata_full(G_OBJECT(slave->connection),
|
2016-09-26 16:01:27 +02:00
|
|
|
autoconnect_root_quark(),
|
|
|
|
|
g_strdup(uuid),
|
|
|
|
|
g_free);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-02-24 09:32:05 +01:00
|
|
|
if (!slave->device) {
|
2017-02-23 18:15:10 +01:00
|
|
|
_LOGD(LOGD_CORE,
|
|
|
|
|
"will NOT activate slave connection '%s' (%s) as a dependency for master "
|
|
|
|
|
"'%s' (%s): "
|
|
|
|
|
"no compatible device found",
|
2017-02-24 09:32:05 +01:00
|
|
|
nm_settings_connection_get_id(slave->connection),
|
|
|
|
|
nm_settings_connection_get_uuid(slave->connection),
|
2017-02-23 18:15:10 +01:00
|
|
|
nm_settings_connection_get_id(master_connection),
|
|
|
|
|
nm_settings_connection_get_uuid(master_connection));
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD(LOGD_CORE,
|
|
|
|
|
"will activate slave connection '%s' (%s) as a dependency for master '%s' (%s)",
|
2017-02-24 09:32:05 +01:00
|
|
|
nm_settings_connection_get_id(slave->connection),
|
|
|
|
|
nm_settings_connection_get_uuid(slave->connection),
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_settings_connection_get_id(master_connection),
|
|
|
|
|
nm_settings_connection_get_uuid(master_connection));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-05-06 14:38:55 +02:00
|
|
|
/* Schedule slave activation */
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_manager_activate_connection(
|
|
|
|
|
self,
|
2017-02-24 09:32:05 +01:00
|
|
|
slave->connection,
|
2016-09-07 17:47:26 +02:00
|
|
|
NULL,
|
2020-09-28 16:03:33 +02:00
|
|
|
NULL,
|
2017-02-24 09:32:05 +01:00
|
|
|
slave->device,
|
2015-05-06 14:38:55 +02:00
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
2018-03-28 17:18:04 +02:00
|
|
|
NM_ACTIVATION_REASON_AUTOCONNECT_SLAVES,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
bind_lifetime_to_profile_visibility
|
|
|
|
|
? NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY
|
|
|
|
|
: NM_ACTIVATION_STATE_FLAG_NONE,
|
2015-05-06 14:38:55 +02:00
|
|
|
&local_err);
|
|
|
|
|
if (local_err) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGW(LOGD_CORE, "Slave connection activation failed: %s", local_err->message);
|
2017-02-24 12:31:20 +01:00
|
|
|
g_clear_error(&local_err);
|
2015-05-06 14:38:55 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
static gboolean
|
|
|
|
|
_internal_activate_vpn(NMManager *self, NMActiveConnection *active, GError **error)
|
2012-02-23 10:04:41 -06:00
|
|
|
{
|
2018-02-05 15:17:06 +01:00
|
|
|
nm_assert(NM_IS_VPN_CONNECTION(active));
|
2012-09-14 15:21:29 -05:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_export(NM_DBUS_OBJECT(active));
|
2018-02-05 15:17:06 +01:00
|
|
|
if (!nm_vpn_manager_activate_connection(NM_MANAGER_GET_PRIVATE(self)->vpn_manager,
|
|
|
|
|
NM_VPN_CONNECTION(active),
|
|
|
|
|
error)) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_unexport(NM_DBUS_OBJECT(active));
|
2018-02-05 15:17:06 +01:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
manager: unexport VPN connections when the activation fails early
When a VPN connection can't be activated we have to unexport and
dispose it. Commit f2182fbf9b24 ("core: don't emit double
PropertiesChanged signal for new active connections") removed the call
to nm_exported_object_unexport() in case of failure because the active
connection already gets unreferenced on failure.
However, an exported object can't be disposed until it's explicitly
unexported because GDBus code keeps a reference to it. The result was
that the active connection was kept alive and exported, but without
explicit references to it. As soon as the connection was unexported,
it was also automatically disposed, causing issues like:
(src/nm-exported-object.c:1025):dispose: code should not be reached
#0 _g_log_abort () at /lib64/libglib-2.0.so.0
#1 g_logv () at /lib64/libglib-2.0.so.0
#2 g_log () at /lib64/libglib-2.0.so.0
#3 g_warn_message () at /lib64/libglib-2.0.so.0
#4 dispose (object=0xaaf110) at src/nm-exported-object.c:1025
#5 dispose (object=0xaaf110) at src/nm-active-connection.c:1246
#6 dispose (object=0xaaf110) at src/vpn/nm-vpn-connection.c:2642
#7 g_object_unref () at /lib64/libgobject-2.0.so.0
#8 registration_data_free () at /lib64/libgio-2.0.so.0
#9 g_hash_table_remove_internal () at /lib64/libglib-2.0.so.0
#10 g_dbus_object_manager_server_unexport_unlocked () at /lib64/libgio-2.0.so.0
#11 g_dbus_object_manager_server_unexport () at /lib64/libgio-2.0.so.0
#12 nm_bus_manager_unregister_object (self=0x9069e0, object=object@entry=0xaaf110) at src/nm-bus-manager.c:858
#13 nm_exported_object_unexport (self=0xaaf110) at src/nm-exported-object.c:714
#14 _settings_connection_removed (connection=<optimized out>, user_data=0xaaf110) at src/nm-active-connection.c:184
#15 g_closure_invoke () at /lib64/libgobject-2.0.so.0
#16 signal_emit_unlocked_R () at /lib64/libgobject-2.0.so.0
#17 g_signal_emit_valist () at /lib64/libgobject-2.0.so.0
#18 g_signal_emit_by_name () at /lib64/libgobject-2.0.so.0
#19 nm_settings_connection_signal_remove (self=self@entry=0x9e4a80, allow_reuse=allow_reuse@entry=0) at src/settings/nm-settings-connection.c:2085
#20 do_delete (self=0x9e4a80, callback=0x58106a <con_delete_cb>, user_data=0xa84fa0) at src/settings/nm-settings-connection.c:768
#21 do_delete (connection=0x9e4a80, callback=0x58106a <con_delete_cb>, user_data=0xa84fa0) at src/settings/plugins/keyfile/nms-keyfile-connection.c:127
#22 nm_settings_connection_delete (self=self@entry=0x9e4a80, callback=callback@entry=0x58106a <con_delete_cb>, user_data=0xa84fa0) at src/settings/nm-settings-connection.c:694
#23 delete_auth_cb (self=self@entry=0x9e4a80, context=context@entry=0x7fffd80131e0, subject=0x91fb40, error=<optimized out>, data=data@entry=0x0) at src/settings/nm-settings-connection.c:1879
#24 pk_auth_cb (chain=0x7fffd00024a0, chain_error=<optimized out>, context=0x7fffd80131e0, user_data=<optimized out>) at src/settings/nm-settings-connection.c:1351
#25 auth_chain_finish (user_data=0x7fffd00024a0) at src/nm-auth-utils.c:92
#26 g_idle_dispatch () at /lib64/libglib-2.0.so.0
Restore the unexport upon failure to fix this.
Fixes: f2182fbf9b2423bd8509b2f0cf218edd96dac32c
https://bugzilla.redhat.com/show_bug.cgi?id=1440077
(cherry picked from commit 69fd96118e9a5e6b613644c2cb61911d554e7f3b)
2017-04-08 09:43:42 +02:00
|
|
|
|
2018-02-05 15:17:06 +01:00
|
|
|
active_connection_add(self, active);
|
|
|
|
|
return TRUE;
|
2012-02-23 10:04:41 -06:00
|
|
|
}
|
|
|
|
|
|
2016-03-24 15:20:44 +01:00
|
|
|
/* Traverse the device to disconnected state. This means that the device is ready
|
|
|
|
|
* for connection and will proceed activating if there's an activation request
|
|
|
|
|
* enqueued.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
unmanaged_to_disconnected(NMDevice *device)
|
|
|
|
|
{
|
|
|
|
|
/* when creating the software device, it can happen that the device is
|
|
|
|
|
* still unmanaged by NM_UNMANAGED_PLATFORM_INIT because we didn't yet
|
|
|
|
|
* get the udev event. At this point, we can no longer delay the activation
|
|
|
|
|
* and force the device to be managed. */
|
|
|
|
|
nm_device_set_unmanaged_by_flags(device,
|
|
|
|
|
NM_UNMANAGED_PLATFORM_INIT,
|
|
|
|
|
FALSE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_USER_REQUESTED);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-03-24 15:20:44 +01:00
|
|
|
nm_device_set_unmanaged_by_flags(device,
|
|
|
|
|
NM_UNMANAGED_USER_EXPLICIT,
|
|
|
|
|
FALSE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_USER_REQUESTED);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core: don't require manageable device in unmanaged_to_disconnected()
It seems the assert there is too strict. I don't really understand why
it fails, but I also don't see why the assert is supposed to hold.
Just return in case the device is unmanagable at this point.
The activation shall fail later.
Traceback from a test build of commit a7aca2ab08abcc5bee02f0f6f9ffe899919f4234:
#0 0x00007fdb28ffb643 in g_logv (log_domain=0x7fdb2b584cc9 "NetworkManager", log_level=G_LOG_LEVEL_CRITICAL, format=<optimized out>, args=args@entry=0x7fff10630200) at gmessages.c:1086
#1 0x00007fdb28ffb7bf in g_log (log_domain=log_domain@entry=0x7fdb2b584cc9 "NetworkManager", log_level=log_level@entry=G_LOG_LEVEL_CRITICAL, format=format@entry=0x7fdb29069190 "%s: assertion '%s' failed") at gmessages.c:1119
#2 0x00007fdb28ffb7f9 in g_return_if_fail_warning (log_domain=log_domain@entry=0x7fdb2b584cc9 "NetworkManager", pretty_function=pretty_function@entry=0x7fdb2b54fee0 <__func__.38922> "unmanaged_to_disconnected", expression=expression@entry=0x7fdb2b54d450 "nm_device_get_managed (device, FALSE)") at gmessages.c:1128
#3 0x00007fdb2b36e05b in unmanaged_to_disconnected (device=device@entry=0x7fdb2d2384f0 [NMDeviceVlan]) at src/nm-manager.c:3201
#4 0x00007fdb2b37eb3a in _internal_activate_generic (error=0x7fff106303d0, active=0x7fdb2d1d4550 [NMActRequest], self=0x0) at src/nm-manager.c:3430
#5 0x00007fdb2b37eb3a in _internal_activate_generic (self=self@entry=0x7fdb2d02b090 [NMManager], active=active@entry=0x7fdb2d1d4550 [NMActRequest], error=error@entry=0x7fff10630450) at src/nm-manager.c:3458
#6 0x00007fdb2b37fe90 in _activation_auth_done (active=0x7fdb2d1d4550 [NMActRequest], success=1, error_desc=0x0, user_data1=0x7fdb2d02b090, user_data2=0x7fdb0800bec0) at src/nm-manager.c:3866
#7 0x00007fdb2b4cc9d7 in auth_done (chain=0x7fdb2d17de30, error=0x0, unused=<optimized out>, user_data=<optimized out>) at src/nm-active-connection.c:929
#8 0x00007fdb2b4d6884 in auth_chain_finish (user_data=0x7fdb2d17de30) at src/nm-auth-utils.c:92
#9 0x00007fdb28ff4d7a in g_main_context_dispatch (context=0x7fdb2cff2e00) at gmain.c:3152
#10 0x00007fdb28ff4d7a in g_main_context_dispatch (context=context@entry=0x7fdb2cff2e00) at gmain.c:3767
#11 0x00007fdb28ff50b8 in g_main_context_iterate (context=0x7fdb2cff2e00, block=block@entry=1, dispatch=dispatch@entry=1, self=<optimized out>) at gmain.c:3838
#12 0x00007fdb28ff538a in g_main_loop_run (loop=0x7fdb2cff2ec0) at gmain.c:4032
#13 0x00007fdb2b349ed7 in main (argc=1, argv=0x7fff106307a8) at src/main.c:438
https://bugzilla.redhat.com/show_bug.cgi?id=1478911
2017-09-04 13:11:08 +02:00
|
|
|
if (!nm_device_get_managed(device, FALSE)) {
|
|
|
|
|
/* the device is still marked as unmanaged. Nothing to do. */
|
|
|
|
|
return;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-03-24 15:20:44 +01:00
|
|
|
if (nm_device_get_state(device) == NM_DEVICE_STATE_UNMANAGED) {
|
|
|
|
|
nm_device_state_changed(device,
|
2016-10-25 15:27:57 +02:00
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_USER_REQUESTED);
|
2016-03-24 15:20:44 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-02-05 15:01:48 +01:00
|
|
|
if (nm_device_get_state(device) == NM_DEVICE_STATE_UNAVAILABLE
|
|
|
|
|
&& nm_device_is_available(device, NM_DEVICE_CHECK_DEV_AVAILABLE_FOR_USER_REQUEST)) {
|
2016-03-24 15:20:44 +01:00
|
|
|
nm_device_state_changed(device,
|
2016-10-25 15:27:57 +02:00
|
|
|
NM_DEVICE_STATE_DISCONNECTED,
|
|
|
|
|
NM_DEVICE_STATE_REASON_USER_REQUESTED);
|
2016-03-24 15:20:44 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
static NMActivationStateFlags
|
|
|
|
|
_activation_bind_lifetime_to_profile_visibility(NMAuthSubject *subject)
|
|
|
|
|
{
|
2019-12-17 20:36:18 +01:00
|
|
|
if (nm_auth_subject_get_subject_type(subject) == NM_AUTH_SUBJECT_TYPE_INTERNAL
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
|| nm_auth_subject_get_unix_process_uid(subject) == 0) {
|
|
|
|
|
/* internal requests and requests from root are always unbound. */
|
|
|
|
|
return NM_ACTIVATION_STATE_FLAG_NONE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* if the activation was not done by internal decision nor root, there
|
|
|
|
|
* are the following cases:
|
|
|
|
|
*
|
|
|
|
|
* - the connection has "connection.permissions" unset and the profile
|
|
|
|
|
* is not restricted to a user and commonly always visible. It does
|
|
|
|
|
* not hurt to bind the lifetime, because we expect the profile to be
|
|
|
|
|
* visible at the moment. If the profile changes (while still being active),
|
|
|
|
|
* we want to pick-up changes to the visibility and possibly disconnect.
|
|
|
|
|
*
|
|
|
|
|
* - the connection has "connection.permissions" set, and the current user
|
|
|
|
|
* is the owner:
|
|
|
|
|
*
|
|
|
|
|
* - Usually, we would expect that the profile is visible at the moment,
|
|
|
|
|
* and of course we want to bind the lifetime. The moment the user
|
|
|
|
|
* logs out, the connection becomes invisible and disconnects.
|
|
|
|
|
*
|
|
|
|
|
* - the profile at this time could already be invisible (e.g. if the
|
2019-01-11 17:07:03 -02:00
|
|
|
* user didn't create a proper session (sudo) and manually activates
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
* an invisible profile. In this case, we still want to bind the
|
|
|
|
|
* lifetime, and it will disconnect after the user logs in and logs
|
|
|
|
|
* out again. NMKeepAlive takes care of that.
|
|
|
|
|
*/
|
|
|
|
|
return NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-24 15:20:44 +01:00
|
|
|
/* The parent connection is ready; we can proceed realizing the device and
|
|
|
|
|
* progressing the device to disconencted state.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
active_connection_parent_active(NMActiveConnection *active,
|
|
|
|
|
NMActiveConnection *parent_ac,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self)
|
2016-03-24 15:20:44 +01:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device = nm_active_connection_get_device(active);
|
|
|
|
|
GError *error = NULL;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *parent;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-08-05 09:06:12 +02:00
|
|
|
g_signal_handlers_disconnect_by_func(active, G_CALLBACK(active_connection_parent_active), self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-02-05 15:51:33 +01:00
|
|
|
if (!parent_ac) {
|
2016-04-27 18:26:39 +02:00
|
|
|
_LOGW(LOGD_CORE,
|
|
|
|
|
"The parent connection device '%s' depended on disappeared.",
|
|
|
|
|
nm_device_get_iface(device));
|
2018-02-05 16:45:33 +01:00
|
|
|
nm_active_connection_set_state_fail(active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_DEVICE_REMOVED,
|
|
|
|
|
"parent device disappeared");
|
2018-02-05 15:51:33 +01:00
|
|
|
return;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_active_connection_get_settings_connection(active);
|
2018-02-05 15:51:33 +01:00
|
|
|
parent = nm_active_connection_get_device(parent_ac);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!nm_device_create_and_realize(device,
|
|
|
|
|
nm_settings_connection_get_connection(sett_conn),
|
|
|
|
|
parent,
|
|
|
|
|
&error)) {
|
2018-02-05 15:51:33 +01:00
|
|
|
_LOGW(LOGD_CORE,
|
|
|
|
|
"Could not realize device '%s': %s",
|
|
|
|
|
nm_device_get_iface(device),
|
|
|
|
|
error->message);
|
2018-02-05 16:45:33 +01:00
|
|
|
nm_active_connection_set_state_fail(active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_DEVICE_REALIZE_FAILED,
|
|
|
|
|
"failure to realize device");
|
2018-02-05 15:51:33 +01:00
|
|
|
return;
|
2016-03-24 15:20:44 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-02-05 15:51:33 +01:00
|
|
|
/* We can now proceed to disconnected state so that activation proceeds. */
|
|
|
|
|
unmanaged_to_disconnected(device);
|
2016-03-24 15:20:44 +01:00
|
|
|
}
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
static gboolean
|
|
|
|
|
_internal_activate_device(NMManager *self, NMActiveConnection *active, GError **error)
|
2008-03-26 13:43:01 +00:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device, *master_device = NULL;
|
|
|
|
|
NMConnection *applied;
|
|
|
|
|
NMSettingsConnection *sett_conn;
|
|
|
|
|
NMSettingsConnection *master_connection = NULL;
|
|
|
|
|
NMConnection *existing_connection = NULL;
|
|
|
|
|
NMActiveConnection *master_ac = NULL;
|
|
|
|
|
NMAuthSubject *subject;
|
|
|
|
|
GError *local = NULL;
|
2018-04-18 11:08:05 +02:00
|
|
|
NMConnectionMultiConnect multi_connect;
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *parent_spec;
|
2008-03-26 13:43:01 +00:00
|
|
|
|
2013-10-31 14:13:33 +01:00
|
|
|
g_return_val_if_fail(NM_IS_MANAGER(self), FALSE);
|
|
|
|
|
g_return_val_if_fail(NM_IS_ACTIVE_CONNECTION(active), FALSE);
|
|
|
|
|
g_return_val_if_fail(error == NULL || *error == NULL, FALSE);
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
g_assert(NM_IS_VPN_CONNECTION(active) == FALSE);
|
2008-03-26 13:43:01 +00:00
|
|
|
|
2018-04-12 09:48:16 +02:00
|
|
|
device = nm_active_connection_get_device(active);
|
|
|
|
|
g_return_val_if_fail(device != NULL, FALSE);
|
|
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_active_connection_get_settings_connection(active);
|
|
|
|
|
nm_assert(sett_conn);
|
2009-10-16 11:52:27 -07:00
|
|
|
|
2015-07-14 16:53:24 +02:00
|
|
|
applied = nm_active_connection_get_applied_connection(active);
|
|
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/* If the device is active and its connection is not visible to the
|
|
|
|
|
* user that's requesting this new activation, fail, since other users
|
|
|
|
|
* should not be allowed to implicitly deactivate private connections
|
|
|
|
|
* by activating a connection of their own.
|
|
|
|
|
*/
|
|
|
|
|
existing_connection = nm_device_get_applied_connection(device);
|
|
|
|
|
subject = nm_active_connection_get_subject(active);
|
2018-04-12 09:48:16 +02:00
|
|
|
if (existing_connection
|
|
|
|
|
&& !nm_auth_is_subject_in_acl_set_error(existing_connection,
|
|
|
|
|
subject,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
error)) {
|
|
|
|
|
g_prefix_error(error, "Private connection already active on the device: ");
|
2014-10-15 21:17:45 -05:00
|
|
|
return FALSE;
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-11-04 19:51:28 -06:00
|
|
|
/* Final connection must be available on device */
|
2018-07-10 11:17:05 +02:00
|
|
|
if (!nm_device_check_connection_available(device,
|
|
|
|
|
applied,
|
|
|
|
|
NM_DEVICE_CHECK_CON_AVAILABLE_FOR_USER_REQUEST,
|
|
|
|
|
NULL,
|
|
|
|
|
&local)) {
|
2013-11-04 19:51:28 -06:00
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_CONNECTION,
|
2018-07-10 11:17:05 +02:00
|
|
|
"Connection '%s' is not available on device %s because %s",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id(sett_conn),
|
2018-07-10 11:17:05 +02:00
|
|
|
nm_device_get_iface(device),
|
|
|
|
|
local->message);
|
|
|
|
|
g_error_free(local);
|
2013-08-28 16:19:20 -05:00
|
|
|
return FALSE;
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-17 07:31:13 +02:00
|
|
|
if (nm_active_connection_get_activation_type(active) == NM_ACTIVATION_TYPE_MANAGED)
|
|
|
|
|
nm_device_sys_iface_state_set(device, NM_DEVICE_SYS_IFACE_STATE_MANAGED);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-01-28 18:45:12 +01:00
|
|
|
/* Try to find the master connection/device if the connection has a dependency */
|
|
|
|
|
if (!find_master(self,
|
|
|
|
|
applied,
|
|
|
|
|
device,
|
|
|
|
|
&master_connection,
|
|
|
|
|
&master_device,
|
|
|
|
|
&master_ac,
|
|
|
|
|
error)) {
|
|
|
|
|
g_prefix_error(error,
|
|
|
|
|
"Can not find a master for %s: ",
|
|
|
|
|
nm_settings_connection_get_id(sett_conn));
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-09-24 16:58:07 -05:00
|
|
|
/* Create any backing resources the device needs */
|
|
|
|
|
if (!nm_device_is_real(device)) {
|
|
|
|
|
NMDevice *parent;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
parent = find_parent_device_for_connection(self,
|
|
|
|
|
nm_settings_connection_get_connection(sett_conn),
|
2019-11-21 18:05:11 +01:00
|
|
|
NULL,
|
|
|
|
|
&parent_spec);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-11-21 18:05:11 +01:00
|
|
|
if (parent_spec && !parent) {
|
|
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
|
|
|
|
"parent device '%s' not found",
|
|
|
|
|
parent_spec);
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-03-24 15:20:44 +01:00
|
|
|
if (parent && !nm_device_is_real(parent)) {
|
|
|
|
|
NMSettingsConnection *parent_con;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMActiveConnection *parent_ac;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-03-24 15:20:44 +01:00
|
|
|
parent_con = nm_device_get_best_connection(parent, NULL, error);
|
|
|
|
|
if (!parent_con) {
|
|
|
|
|
g_prefix_error(error, "%s failed to create parent: ", nm_device_get_iface(device));
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-12-03 16:29:15 +01:00
|
|
|
if (nm_active_connection_get_activation_reason(active)
|
|
|
|
|
== NM_ACTIVATION_REASON_AUTOCONNECT
|
2020-06-03 18:35:07 +02:00
|
|
|
&& NM_FLAGS_HAS(nm_settings_connection_autoconnect_blocked_reason_get(parent_con),
|
|
|
|
|
NM_SETTINGS_AUTO_CONNECT_BLOCKED_REASON_USER_REQUEST)) {
|
2019-12-03 16:29:15 +01:00
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
|
|
|
|
"the parent connection of %s cannot autoactivate because it is blocked "
|
|
|
|
|
"due to user request",
|
|
|
|
|
nm_device_get_iface(device));
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
parent_ac = nm_manager_activate_connection(
|
|
|
|
|
self,
|
|
|
|
|
parent_con,
|
|
|
|
|
NULL,
|
2020-09-28 16:03:33 +02:00
|
|
|
NULL,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
parent,
|
2018-03-28 17:18:04 +02:00
|
|
|
subject,
|
|
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
|
|
|
|
nm_active_connection_get_activation_reason(active),
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
nm_active_connection_get_state_flags(active)
|
|
|
|
|
& NM_ACTIVATION_STATE_FLAG_LIFETIME_BOUND_TO_PROFILE_VISIBILITY,
|
2018-03-28 17:18:04 +02:00
|
|
|
error);
|
2016-03-24 15:20:44 +01:00
|
|
|
if (!parent_ac) {
|
|
|
|
|
g_prefix_error(error,
|
|
|
|
|
"%s failed to activate parent: ",
|
|
|
|
|
nm_device_get_iface(device));
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-03-24 15:20:44 +01:00
|
|
|
/* We can't realize now; defer until the parent device is ready. */
|
|
|
|
|
g_signal_connect(active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_PARENT_ACTIVE,
|
2021-08-05 09:06:12 +02:00
|
|
|
G_CALLBACK(active_connection_parent_active),
|
2016-03-24 15:20:44 +01:00
|
|
|
self);
|
|
|
|
|
nm_active_connection_set_parent(active, parent_ac);
|
|
|
|
|
} else {
|
|
|
|
|
/* We can realize now; no need to wait for a parent device. */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!nm_device_create_and_realize(device,
|
|
|
|
|
nm_settings_connection_get_connection(sett_conn),
|
|
|
|
|
parent,
|
|
|
|
|
error)) {
|
2016-03-24 15:20:44 +01:00
|
|
|
g_prefix_error(error,
|
|
|
|
|
"%s failed to create resources: ",
|
|
|
|
|
nm_device_get_iface(device));
|
|
|
|
|
return FALSE;
|
2014-09-24 16:58:07 -05:00
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
/* Ensure there's a master active connection the new connection we're
|
|
|
|
|
* activating can depend on.
|
|
|
|
|
*/
|
|
|
|
|
if (master_connection || master_device) {
|
|
|
|
|
if (master_connection) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD(LOGD_CORE,
|
|
|
|
|
"Activation of '%s' requires master connection '%s'",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id(sett_conn),
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_settings_connection_get_id(master_connection));
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
|
|
|
|
if (master_device) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD(LOGD_CORE,
|
|
|
|
|
"Activation of '%s' requires master device '%s'",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id(sett_conn),
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_device_get_ip_iface(master_device));
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
/* Ensure eg bond slave and the candidate master is a bond master */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (master_connection
|
|
|
|
|
&& !is_compatible_with_slave(nm_settings_connection_get_connection(master_connection),
|
|
|
|
|
applied)) {
|
2016-03-30 09:00:06 +02:00
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
|
|
|
|
"The master connection '%s' is not compatible with '%s'",
|
|
|
|
|
nm_settings_connection_get_id(master_connection),
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id(sett_conn));
|
2013-08-28 16:19:20 -05:00
|
|
|
return FALSE;
|
2012-02-26 17:27:42 -06:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-02-26 17:27:42 -06:00
|
|
|
if (!master_ac) {
|
2014-01-28 17:24:26 -05:00
|
|
|
master_ac =
|
|
|
|
|
ensure_master_active_connection(self,
|
|
|
|
|
nm_active_connection_get_subject(active),
|
2015-07-14 16:53:24 +02:00
|
|
|
applied,
|
2014-01-28 17:24:26 -05:00
|
|
|
device,
|
|
|
|
|
master_connection,
|
|
|
|
|
master_device,
|
2018-03-28 17:18:04 +02:00
|
|
|
nm_active_connection_get_activation_reason(active),
|
2014-01-28 17:24:26 -05:00
|
|
|
error);
|
|
|
|
|
if (!master_ac) {
|
2016-03-30 09:00:06 +02:00
|
|
|
if (master_device) {
|
|
|
|
|
g_prefix_error(error,
|
|
|
|
|
"Master device '%s' can't be activated: ",
|
2021-01-25 14:56:46 +01:00
|
|
|
nm_device_get_ip_iface(master_device));
|
2016-03-30 09:00:06 +02:00
|
|
|
} else {
|
|
|
|
|
g_prefix_error(error,
|
|
|
|
|
"Master connection '%s' can't be activated: ",
|
2021-01-25 14:56:46 +01:00
|
|
|
nm_settings_connection_get_id(master_connection));
|
2016-03-30 09:00:06 +02:00
|
|
|
}
|
2014-01-28 17:24:26 -05:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
2008-04-07 Dan Williams <dcbw@redhat.com>
* include/NetworkManager.h
- Remove the DOWN and CANCELLED device states
- Add UNMANAGED and UNAVAILABLE device states
- Document the device states
* introspection/nm-device.xml
src/nm-device-interface.c
src/nm-device-interface.h
- Add the 'managed' property
* test/nm-tool.c
- (detail_device): print out device state
* src/NetworkManagerSystem.h
src/backends/NetworkManagerArch.c
src/backends/NetworkManagerDebian.c
src/backends/NetworkManagerFrugalware.c
src/backends/NetworkManagerGentoo.c
src/backends/NetworkManagerMandriva.c
src/backends/NetworkManagerPaldo.c
src/backends/NetworkManagerRedHat.c
src/backends/NetworkManagerSlackware.c
src/backends/NetworkManagerSuSE.c
- (nm_system_device_get_system_config, nm_system_device_get_disabled
nm_system_device_free_system_config): remove; they were unused and
their functionality should be re-implemented in each distro's
system settings service plugin
* src/nm-gsm-device.c
src/nm-gsm-device.h
src/nm-cdma-device.c
src/nm-cdma-device.h
- (*_new): take the 'managed' argument
* src/nm-device.c
- (nm_device_set_address): remove, fold into nm_device_bring_up()
- (nm_device_init): start in unmanaged state, not disconnected
- (constructor): don't start device until the system settings service
has had a chance to figure out if the device is managed or not
- (nm_device_deactivate, nm_device_bring_up, nm_device_bring_down):
don't set device state here, let callers handle that as appropriate
- (nm_device_dispose): don't touch the device if it's not managed
- (set_property, get_property, nm_device_class_init): implement the
'managed' property
- (nm_device_state_changed): bring the device up if its now managed,
and deactivate it if it used to be active
- (nm_device_get_managed, nm_device_set_managed): do the right thing
with the managed state
* src/nm-hal-manager.c
- (wired_device_creator, wireless_device_creator, modem_device_creator):
take initial managed state and pass it along to device constructors
- (create_device_and_add_to_list): get managed state and pass to
type creators
* src/nm-device-802-11-wireless.c
- (real_can_activate): fold in most of
nm_device_802_11_wireless_can_activate()
- (can_scan): can't scan in UNAVAILABLE or UNMANAGED
- (link_timeout_cb): instead of deactivating, change device state and
let the device state handler to it
- (real_update_hw_address): clean up
- (state_changed_cb): when entering UNAVAILABLE state, schedule an idle
handler to transition to DISCONNECTED if the device isn't rfkilled
* src/nm-device-802-3-ethernet.c
- (set_carrier): move above callers and get rid of prototype
- (device_state_changed): when entering UNAVAILABLE state, schedule an
idle handler to transition to DISCONNECTED if the device has a
carrier
- (real_update_hw_address): clean up
- (link_timeout_cb, ppp_state_changed): change state instead of calling
deactivation directly as deactivation doesn't change state anymore
* src/NetworkManagerPolicy.c
- (schedule_activate_check): yay, remove wireless_enabled hack since
the NMManager and wireless devices work that out themselves now
- (device_state_changed): change to a switch and update for new device
states
- (device_carrier_changed): remove; device handles this now through
state changes
- (device_added): don't care about carrier any more; the initial
activation check will happen when the device transitions to
DISCONNECTED
* src/nm-manager.c
- (dispose): clear unmanaged devices
- (handle_unmanaged_devices): update unmanaged device list and toggle
the managed property on each device when needed
- (system_settings_properties_changed_cb): handle signals from the
system settings service
- (system_settings_get_unmanaged_devices_cb): handle callback from
getting the unmanaged device list method call
- (query_unmanaged_devices): ask the system settings service for its
list of unmanaged devices
- (nm_manager_name_owner_changed, initial_get_connections): get unmanaged
devices
- (manager_set_wireless_enabled): push rfkill state down to wireless
devices directly and let them handle the necessary state transitions
- (manager_device_state_changed): update for new device states
- (nm_manager_add_device): set initial rfkill state on wireless devices
- (nm_manager_remove_device): don't touch the device if it's unmanaged
- (nm_manager_activate_connection): return error if the device is
unmanaged
- (nm_manager_sleep): handle new device states correctly; don't change
the state of unavailable/unmanaged devices
* libnm-glib/nm-device-802-11-wireless.c
- (state_changed_cb): update for new device states
git-svn-id: http://svn-archive.gnome.org/svn/NetworkManager/trunk@3540 4912f4e0-d625-0410-9fb7-b9a5a253dbdc
2008-04-08 02:58:02 +00:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-01-25 13:27:16 +01:00
|
|
|
/* Now that we're activating a slave for that master, make sure the master just
|
|
|
|
|
* decides to go unmanaged while we're activating (perhaps because other slaves
|
|
|
|
|
* go away leaving him with no kids).
|
|
|
|
|
*/
|
2017-01-26 14:18:20 +01:00
|
|
|
if (master_device) {
|
|
|
|
|
nm_device_set_unmanaged_by_flags(master_device,
|
|
|
|
|
NM_UNMANAGED_EXTERNAL_DOWN,
|
|
|
|
|
NM_UNMAN_FLAG_OP_FORGET,
|
|
|
|
|
NM_DEVICE_STATE_REASON_USER_REQUESTED);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
nm_active_connection_set_master(active, master_ac);
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD(LOGD_CORE,
|
|
|
|
|
"Activation of '%s' depends on active connection %p %s",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_settings_connection_get_id(sett_conn),
|
2016-03-02 11:38:26 +01:00
|
|
|
master_ac,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_get_path(NM_DBUS_OBJECT(master_ac)) ?: "");
|
2008-03-26 13:43:01 +00:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-05-06 14:38:55 +02:00
|
|
|
/* Check slaves for master connection and possibly activate them */
|
2019-07-02 11:51:29 +02:00
|
|
|
autoconnect_slaves(self,
|
|
|
|
|
sett_conn,
|
|
|
|
|
device,
|
|
|
|
|
nm_active_connection_get_subject(active),
|
|
|
|
|
nm_active_connection_get_activation_reason(active)
|
|
|
|
|
== NM_ACTIVATION_REASON_USER_REQUEST);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
multi_connect =
|
|
|
|
|
_nm_connection_get_multi_connect(nm_settings_connection_get_connection(sett_conn));
|
2018-04-18 11:08:05 +02:00
|
|
|
if (multi_connect == NM_CONNECTION_MULTI_CONNECT_MULTIPLE
|
|
|
|
|
|| (multi_connect == NM_CONNECTION_MULTI_CONNECT_MANUAL_MULTIPLE
|
|
|
|
|
&& NM_IN_SET(nm_active_connection_get_activation_reason(active),
|
|
|
|
|
NM_ACTIVATION_REASON_ASSUME,
|
|
|
|
|
NM_ACTIVATION_REASON_AUTOCONNECT_SLAVES,
|
|
|
|
|
NM_ACTIVATION_REASON_USER_REQUEST))) {
|
|
|
|
|
/* the profile can be activated multiple times. Proceed. */
|
|
|
|
|
} else {
|
2018-04-22 14:45:40 +02:00
|
|
|
gs_unref_ptrarray GPtrArray *all_ac_arr = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMActiveConnection *ac;
|
2018-04-22 14:45:40 +02:00
|
|
|
guint i, n_all;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-22 14:45:40 +02:00
|
|
|
/* Disconnect the connection if already connected or queued for activation.
|
|
|
|
|
* The connection cannot be active multiple times (at the same time). */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
ac = active_connection_find(self,
|
|
|
|
|
sett_conn,
|
|
|
|
|
NULL,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATED,
|
2021-05-13 10:49:39 +02:00
|
|
|
FALSE,
|
2018-04-22 14:45:40 +02:00
|
|
|
&all_ac_arr);
|
|
|
|
|
if (ac) {
|
|
|
|
|
n_all = all_ac_arr ? all_ac_arr->len : ((guint) 1);
|
|
|
|
|
for (i = 0; i < n_all; i++) {
|
|
|
|
|
nm_device_disconnect_active_connection(all_ac_arr ? all_ac_arr->pdata[i] : ac,
|
2018-11-20 14:25:42 +01:00
|
|
|
NM_DEVICE_STATE_REASON_NEW_ACTIVATION,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_UNKNOWN);
|
2018-04-22 14:45:40 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-03-24 15:20:44 +01:00
|
|
|
/* If the device is there, we can ready it for the activation. */
|
2018-02-05 15:06:24 +01:00
|
|
|
if (nm_device_is_real(device)) {
|
2016-03-24 15:20:44 +01:00
|
|
|
unmanaged_to_disconnected(device);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-02-05 15:06:24 +01:00
|
|
|
if (!nm_device_get_managed(device, FALSE)) {
|
|
|
|
|
/* Unexpectedly, the device is still unmanaged. That can happen for example,
|
|
|
|
|
* if the device is forcibly unmanaged due to NM_UNMANAGED_USER_SETTINGS. */
|
|
|
|
|
g_set_error_literal(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
|
|
|
|
"Activation failed because the device is unmanaged");
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-20 11:22:19 -05:00
|
|
|
/* Export the new ActiveConnection to clients and start it on the device */
|
2018-02-05 15:17:06 +01:00
|
|
|
active_connection_add(self, active);
|
core: queue re-activations to allow DEACTIVATING state
If a device is already activated, queue the new activation to allow
the transition through the DEACTIVATING state.
---
Also remove the "HACK" bits in nm_device_deactivate(). This hack was
added on 2007-09-25 in commit 9c2848d. At the time, with user settings
services, if a client created a connection and requested that NM
activate it, NM may not have read the connection from the client over
D-Bus yet. So NM created a "deferred" activation request which waited
until the connection was read from the client, and then began activation.
The Policy watched for device state changes and other events (like
it does now) and activated a new device if the old one was no longer
valid. It specifically checked for deferred activations and then
did nothing. However, when the client's connection was read, then
nm-device.c cleared the deferred activation bit, leading to a short
period of time where the device was in DISCONNECTED state but there
was no deferred activation, because the device only changes state to
PREPARE from the idle handler for stage1. If other events happened
during this time, the policy would tear down the device that was
about to be activated. This early state transition to PREPARE
worked around that.
We need to remove it now though, because (a) the reason for its
existence is no longer valid, and (b) _device_activate() may now
be called from inside nm_device_state_changed() and thus it cannot
change to a new state inside the function.
2014-02-17 17:16:08 -06:00
|
|
|
nm_device_queue_activation(device, NM_ACT_REQUEST(active));
|
2013-08-28 16:19:20 -05:00
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
_internal_activate_generic(NMManager *self, NMActiveConnection *active, GError **error)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
gboolean success = FALSE;
|
|
|
|
|
|
2014-02-26 13:18:16 -06:00
|
|
|
/* Ensure activation request is still valid, eg that its device hasn't gone
|
|
|
|
|
* away or that some other dependency has not failed.
|
|
|
|
|
*/
|
|
|
|
|
if (nm_active_connection_get_state(active) >= NM_ACTIVE_CONNECTION_STATE_DEACTIVATING) {
|
|
|
|
|
g_set_error_literal(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_DEPENDENCY_FAILED,
|
|
|
|
|
"Activation failed because dependencies failed.");
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
if (NM_IS_VPN_CONNECTION(active))
|
|
|
|
|
success = _internal_activate_vpn(self, active, error);
|
|
|
|
|
else
|
|
|
|
|
success = _internal_activate_device(self, active, error);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
if (success) {
|
|
|
|
|
/* Force an update of the Manager's activating-connection property.
|
|
|
|
|
* The device changes state before the AC gets exported, which causes
|
|
|
|
|
* the manager's 'activating-connection' property to be NULL since the
|
|
|
|
|
* AC only gets a D-Bus path when it's exported. So now that the AC
|
|
|
|
|
* is exported, make sure the manager's activating-connection property
|
|
|
|
|
* is up-to-date.
|
|
|
|
|
*/
|
2018-06-28 18:05:05 +02:00
|
|
|
policy_activating_ac_changed(G_OBJECT(priv->policy), NULL, self);
|
2013-04-22 13:29:36 -05:00
|
|
|
}
|
2012-08-22 17:11:31 -05:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
return success;
|
2008-03-26 13:43:01 +00:00
|
|
|
}
|
|
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
static NMActiveConnection *
|
2021-11-09 13:28:54 +01:00
|
|
|
_new_active_connection(NMManager *self,
|
2018-04-12 11:32:18 +02:00
|
|
|
gboolean is_vpn,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMSettingsConnection *sett_conn,
|
|
|
|
|
NMConnection *incompl_conn,
|
|
|
|
|
NMConnection *applied,
|
|
|
|
|
const char *specific_object,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
NMAuthSubject *subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NMActivationType activation_type,
|
2018-03-28 17:18:04 +02:00
|
|
|
NMActivationReason activation_reason,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
NMActivationStateFlags initial_state_flags,
|
2021-11-09 13:28:54 +01:00
|
|
|
GError **error)
|
2013-08-28 16:19:20 -05:00
|
|
|
{
|
2018-04-12 13:37:40 +02:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *parent_device;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert(!sett_conn || NM_IS_SETTINGS_CONNECTION(sett_conn));
|
|
|
|
|
nm_assert(!incompl_conn || NM_IS_CONNECTION(incompl_conn));
|
|
|
|
|
nm_assert((!incompl_conn) ^ (!sett_conn));
|
|
|
|
|
nm_assert(NM_IS_AUTH_SUBJECT(subject));
|
|
|
|
|
nm_assert(is_vpn
|
|
|
|
|
== _connection_is_vpn(sett_conn ? nm_settings_connection_get_connection(sett_conn)
|
|
|
|
|
: incompl_conn));
|
2018-04-19 15:30:31 +02:00
|
|
|
nm_assert(is_vpn || NM_IS_DEVICE(device));
|
2018-04-12 14:02:39 +02:00
|
|
|
nm_assert(!nm_streq0(specific_object, "/"));
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert(!applied || NM_IS_CONNECTION(applied));
|
|
|
|
|
nm_assert(!is_vpn || !applied);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 11:32:18 +02:00
|
|
|
if (is_vpn) {
|
2018-04-12 14:02:39 +02:00
|
|
|
NMActiveConnection *parent;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 11:32:18 +02:00
|
|
|
/* FIXME: for VPN connections, we don't allow re-activating an
|
|
|
|
|
* already active connection. It's a bug, and should be fixed together
|
|
|
|
|
* when reworking VPN handling. */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (active_connection_find_by_connection(self,
|
|
|
|
|
sett_conn,
|
|
|
|
|
incompl_conn,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_ACTIVATED,
|
|
|
|
|
NULL)) {
|
2018-04-12 11:32:18 +02:00
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_CONNECTION_ALREADY_ACTIVE,
|
|
|
|
|
"Connection '%s' is already active",
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn ? nm_settings_connection_get_id(sett_conn)
|
|
|
|
|
: nm_connection_get_id(incompl_conn));
|
2018-04-12 11:32:18 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-03-07 11:04:36 +01:00
|
|
|
if (activation_type != NM_ACTIVATION_TYPE_MANAGED)
|
|
|
|
|
g_return_val_if_reached(NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 13:37:40 +02:00
|
|
|
if (specific_object) {
|
|
|
|
|
/* Find the specific connection the client requested we use */
|
|
|
|
|
parent = active_connection_get_by_path(self, specific_object);
|
|
|
|
|
if (!parent) {
|
|
|
|
|
g_set_error_literal(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_CONNECTION_NOT_ACTIVE,
|
|
|
|
|
"Base connection for VPN connection not active.");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
} else
|
|
|
|
|
parent = priv->primary_connection;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 13:37:40 +02:00
|
|
|
if (!parent) {
|
|
|
|
|
g_set_error_literal(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_CONNECTION,
|
|
|
|
|
"Could not find source connection.");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-19 15:30:31 +02:00
|
|
|
parent_device = nm_active_connection_get_device(parent);
|
|
|
|
|
if (!parent_device) {
|
2018-04-12 13:37:40 +02:00
|
|
|
g_set_error_literal(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"Source connection had no active device");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-19 15:30:31 +02:00
|
|
|
if (device && device != parent_device) {
|
|
|
|
|
g_set_error_literal(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"The device doesn't match the active connection.");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
return (NMActiveConnection *) nm_vpn_connection_new(
|
|
|
|
|
sett_conn,
|
2018-04-19 15:30:31 +02:00
|
|
|
parent_device,
|
2018-04-12 13:37:40 +02:00
|
|
|
nm_dbus_object_get_path(NM_DBUS_OBJECT(parent)),
|
|
|
|
|
activation_reason,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
initial_state_flags,
|
2018-04-12 13:37:40 +02:00
|
|
|
subject);
|
2013-08-28 16:19:20 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
return (NMActiveConnection *) nm_act_request_new(sett_conn,
|
2016-09-07 17:47:26 +02:00
|
|
|
applied,
|
2013-08-28 16:19:20 -05:00
|
|
|
specific_object,
|
|
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
activation_type,
|
2018-03-28 17:18:04 +02:00
|
|
|
activation_reason,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
initial_state_flags,
|
2013-08-28 16:19:20 -05:00
|
|
|
device);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
_internal_activation_auth_done(NMManager *self,
|
2018-04-18 09:06:54 +02:00
|
|
|
NMActiveConnection *active,
|
2013-08-28 16:19:20 -05:00
|
|
|
gboolean success,
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *error_desc)
|
2013-08-28 16:19:20 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMActiveConnection *ac;
|
2017-11-23 21:30:09 +01:00
|
|
|
gs_free_error GError *error = NULL;
|
2012-09-13 16:51:58 -05:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
nm_assert(NM_IS_ACTIVE_CONNECTION(active));
|
2015-06-23 20:52:04 +02:00
|
|
|
|
2018-04-12 15:40:32 +02:00
|
|
|
if (!success)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
2018-04-19 13:28:38 +02:00
|
|
|
/* Don't continue with an autoconnect-activation if a more important activation
|
|
|
|
|
* already exists.
|
|
|
|
|
* We also check this earlier, but there we may fail to detect a duplicate
|
|
|
|
|
* if the existing active connection was undergoing authorization.
|
2017-06-16 10:59:35 +02:00
|
|
|
*/
|
2020-06-15 09:36:41 +02:00
|
|
|
if (NM_IN_SET(nm_active_connection_get_activation_reason(active),
|
|
|
|
|
NM_ACTIVATION_REASON_EXTERNAL,
|
|
|
|
|
NM_ACTIVATION_REASON_ASSUME,
|
|
|
|
|
NM_ACTIVATION_REASON_AUTOCONNECT)) {
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_for_each_entry (ac, &priv->active_connections_lst_head, active_connections_lst) {
|
|
|
|
|
if (nm_active_connection_get_device(ac) == nm_active_connection_get_device(active)
|
|
|
|
|
&& nm_active_connection_get_settings_connection(ac)
|
|
|
|
|
== nm_active_connection_get_settings_connection(active)
|
2019-05-13 14:30:48 +02:00
|
|
|
&& nm_active_connection_get_state(ac) <= NM_ACTIVE_CONNECTION_STATE_ACTIVATED) {
|
2017-06-16 10:59:35 +02:00
|
|
|
g_set_error(&error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_CONNECTION_ALREADY_ACTIVE,
|
|
|
|
|
"Connection '%s' is already active",
|
|
|
|
|
nm_active_connection_get_settings_connection_id(active));
|
2018-04-12 15:40:32 +02:00
|
|
|
goto fail;
|
2017-06-16 10:59:35 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2017-06-16 10:59:35 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 15:40:32 +02:00
|
|
|
if (_internal_activate_generic(self, active, &error))
|
|
|
|
|
return;
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2018-04-12 15:40:32 +02:00
|
|
|
fail:
|
2020-11-04 09:46:41 +01:00
|
|
|
_delete_volatile_connection_do(self, nm_active_connection_get_settings_connection(active));
|
2017-11-23 21:30:09 +01:00
|
|
|
nm_assert(error_desc || error);
|
2018-02-05 16:41:10 +01:00
|
|
|
nm_active_connection_set_state_fail(active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_UNKNOWN,
|
|
|
|
|
error_desc ?: error->message);
|
2013-08-28 16:19:20 -05:00
|
|
|
}
|
|
|
|
|
|
2014-02-26 16:04:45 -06:00
|
|
|
/**
|
|
|
|
|
* nm_manager_activate_connection():
|
|
|
|
|
* @self: the #NMManager
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* @sett_conn: the #NMSettingsConnection to activate on @device
|
2016-09-07 17:47:26 +02:00
|
|
|
* @applied: (allow-none): the applied connection to activate on @device
|
2014-02-26 16:04:45 -06:00
|
|
|
* @specific_object: the specific object path, if any, for the activation
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* @device: the #NMDevice to activate @sett_conn on. Can be %NULL for VPNs.
|
2014-02-26 16:04:45 -06:00
|
|
|
* @subject: the subject which requested activation
|
2017-03-07 11:04:36 +01:00
|
|
|
* @activation_type: whether to assume the connection. That is, take over gracefully,
|
|
|
|
|
* non-destructible.
|
2018-03-28 17:18:04 +02:00
|
|
|
* @activation_reason: the reason for activation
|
2019-01-11 17:07:03 -02:00
|
|
|
* @initial_state_flags: the initial state flags for the activation.
|
2014-02-26 16:04:45 -06:00
|
|
|
* @error: return location for an error
|
|
|
|
|
*
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* Begins a new internally-initiated activation of @sett_conn on @device.
|
2014-02-26 16:04:45 -06:00
|
|
|
* @subject should be the subject of the activation that triggered this
|
|
|
|
|
* one, or if this is an autoconnect request, a new internal subject.
|
|
|
|
|
* The returned #NMActiveConnection is owned by the Manager and should be
|
2016-09-07 17:47:26 +02:00
|
|
|
* referenced by the caller if the caller continues to use it. If @applied
|
|
|
|
|
* is supplied, it shall not be modified by the caller afterwards.
|
2014-02-26 16:04:45 -06:00
|
|
|
*
|
|
|
|
|
* Returns: (transfer none): the new #NMActiveConnection that tracks
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* activation of @sett_conn on @device
|
2014-02-26 16:04:45 -06:00
|
|
|
*/
|
2013-08-28 16:19:20 -05:00
|
|
|
NMActiveConnection *
|
2021-11-09 13:28:54 +01:00
|
|
|
nm_manager_activate_connection(NMManager *self,
|
|
|
|
|
NMSettingsConnection *sett_conn,
|
|
|
|
|
NMConnection *applied,
|
|
|
|
|
const char *specific_object,
|
|
|
|
|
NMDevice *device,
|
|
|
|
|
NMAuthSubject *subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NMActivationType activation_type,
|
2018-03-28 17:18:04 +02:00
|
|
|
NMActivationReason activation_reason,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
NMActivationStateFlags initial_state_flags,
|
2021-11-09 13:28:54 +01:00
|
|
|
GError **error)
|
2013-08-28 16:19:20 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv;
|
2013-08-28 16:19:20 -05:00
|
|
|
NMActiveConnection *active;
|
2021-11-09 13:28:54 +01:00
|
|
|
AsyncOpData *async_op_data;
|
2018-04-30 09:50:04 +02:00
|
|
|
gboolean is_vpn;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 13:37:40 +02:00
|
|
|
g_return_val_if_fail(NM_IS_MANAGER(self), NULL);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
g_return_val_if_fail(NM_IS_SETTINGS_CONNECTION(sett_conn), NULL);
|
|
|
|
|
is_vpn = _connection_is_vpn(nm_settings_connection_get_connection(sett_conn));
|
2018-04-30 09:50:04 +02:00
|
|
|
g_return_val_if_fail(is_vpn || NM_IS_DEVICE(device), NULL);
|
2018-04-12 09:48:16 +02:00
|
|
|
g_return_val_if_fail(!error || !*error, NULL);
|
2018-04-12 14:02:39 +02:00
|
|
|
nm_assert(!nm_streq0(specific_object, "/"));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 13:37:40 +02:00
|
|
|
priv = NM_MANAGER_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!nm_auth_is_subject_in_acl_set_error(nm_settings_connection_get_connection(sett_conn),
|
2018-04-12 09:48:16 +02:00
|
|
|
subject,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
error))
|
2014-01-21 13:41:18 -06:00
|
|
|
return NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-06-23 20:52:04 +02:00
|
|
|
/* Look for a active connection that's equivalent and is already pending authorization
|
|
|
|
|
* and eventual activation. This is used to de-duplicate concurrent activations which would
|
|
|
|
|
* otherwise race and cause the device to disconnect and reconnect repeatedly.
|
|
|
|
|
* In particular, this allows the master and multiple slaves to concurrently auto-activate
|
|
|
|
|
* while all the slaves would use the same active-connection. */
|
2018-04-18 09:06:54 +02:00
|
|
|
c_list_for_each_entry (async_op_data, &priv->async_op_lst_head, async_op_lst) {
|
|
|
|
|
if (async_op_data->async_op_type != ASYNC_OP_TYPE_AC_AUTH_ACTIVATE_INTERNAL)
|
|
|
|
|
continue;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
active = async_op_data->ac_auth.active;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (sett_conn == nm_active_connection_get_settings_connection(active)
|
2018-04-12 14:02:39 +02:00
|
|
|
&& nm_streq0(nm_active_connection_get_specific_object(active), specific_object)
|
2018-04-30 09:50:04 +02:00
|
|
|
&& (!device || nm_active_connection_get_device(active) == device)
|
2019-12-17 20:36:18 +01:00
|
|
|
&& nm_auth_subject_get_subject_type(nm_active_connection_get_subject(active))
|
|
|
|
|
== NM_AUTH_SUBJECT_TYPE_INTERNAL
|
|
|
|
|
&& nm_auth_subject_get_subject_type(subject) == NM_AUTH_SUBJECT_TYPE_INTERNAL
|
2018-04-12 15:47:40 +02:00
|
|
|
&& nm_active_connection_get_activation_reason(active) == activation_reason)
|
2015-06-23 20:52:04 +02:00
|
|
|
return active;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
active = _new_active_connection(self,
|
2018-04-30 09:50:04 +02:00
|
|
|
is_vpn,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
|
|
|
|
NULL,
|
2016-09-07 17:47:26 +02:00
|
|
|
applied,
|
2013-08-28 16:19:20 -05:00
|
|
|
specific_object,
|
|
|
|
|
device,
|
|
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
activation_type,
|
2018-03-28 17:18:04 +02:00
|
|
|
activation_reason,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
initial_state_flags,
|
2013-08-28 16:19:20 -05:00
|
|
|
error);
|
2018-04-12 15:40:32 +02:00
|
|
|
if (!active)
|
|
|
|
|
return NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
nm_active_connection_authorize(active,
|
|
|
|
|
NULL,
|
|
|
|
|
_async_op_complete_ac_auth_cb,
|
|
|
|
|
_async_op_data_new_authorize_activate_internal(self, active));
|
2013-08-28 16:19:20 -05:00
|
|
|
return active;
|
2007-10-01 15:38:39 +00:00
|
|
|
}
|
|
|
|
|
|
2014-10-15 21:17:45 -05:00
|
|
|
/**
|
|
|
|
|
* validate_activation_request:
|
|
|
|
|
* @self: the #NMManager
|
|
|
|
|
* @context: the D-Bus context of the requestor
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
* @sett_conn: the #NMSettingsConnection to be activated, or %NULL if there
|
|
|
|
|
* is only a partial activation.
|
|
|
|
|
* @connection: the partial #NMConnection to be activated (if @sett_conn is unspecified)
|
2018-04-11 17:05:00 +02:00
|
|
|
* @device_path: the object path of the device to be activated, or NULL
|
2018-09-15 07:20:54 -04:00
|
|
|
* @out_device: on successful return, the #NMDevice to be activated with @connection
|
2018-04-12 10:34:11 +02:00
|
|
|
* The caller may pass in a device which shortcuts the lookup by path.
|
|
|
|
|
* In this case, the passed in device must have the matching @device_path
|
|
|
|
|
* already.
|
2018-04-12 11:15:42 +02:00
|
|
|
* @out_is_vpn: on successful return, %TRUE if @connection is a VPN connection
|
2014-10-15 21:17:45 -05:00
|
|
|
* @error: location to store an error on failure
|
|
|
|
|
*
|
|
|
|
|
* Performs basic validation on an activation request, including ensuring that
|
|
|
|
|
* the requestor is a valid Unix process, is not disallowed in @connection
|
|
|
|
|
* permissions, and that a device exists that can activate @connection.
|
|
|
|
|
*
|
|
|
|
|
* Returns: on success, the #NMAuthSubject representing the requestor, or
|
|
|
|
|
* %NULL on error
|
|
|
|
|
*/
|
2013-07-29 12:42:16 -05:00
|
|
|
static NMAuthSubject *
|
2021-11-09 13:28:54 +01:00
|
|
|
validate_activation_request(NMManager *self,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMSettingsConnection *sett_conn,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
const char *device_path,
|
|
|
|
|
NMDevice **out_device,
|
|
|
|
|
gboolean *out_is_vpn,
|
|
|
|
|
GError **error)
|
|
|
|
|
{
|
|
|
|
|
NMDevice *device = NULL;
|
|
|
|
|
gboolean is_vpn = FALSE;
|
2018-04-19 11:52:19 +02:00
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_assert(!sett_conn || NM_IS_SETTINGS_CONNECTION(sett_conn));
|
|
|
|
|
nm_assert(!connection || NM_IS_CONNECTION(connection));
|
|
|
|
|
nm_assert(sett_conn || connection);
|
|
|
|
|
nm_assert(!connection || !sett_conn
|
|
|
|
|
|| connection == nm_settings_connection_get_connection(sett_conn));
|
2018-04-11 17:35:23 +02:00
|
|
|
nm_assert(out_device);
|
2018-04-12 11:15:42 +02:00
|
|
|
nm_assert(out_is_vpn);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!connection)
|
|
|
|
|
connection = nm_settings_connection_get_connection(sett_conn);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-07-29 12:42:16 -05:00
|
|
|
/* Validate the caller */
|
2019-12-19 11:30:38 +01:00
|
|
|
subject = nm_dbus_manager_new_auth_subject_from_context(context);
|
2013-07-29 12:42:16 -05:00
|
|
|
if (!subject) {
|
2012-09-13 16:51:58 -05:00
|
|
|
g_set_error_literal(error,
|
2013-07-29 12:42:16 -05:00
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2019-09-04 10:18:56 +02:00
|
|
|
NM_UTILS_ERROR_MSG_REQ_UID_UKNOWN);
|
2013-07-29 12:42:16 -05:00
|
|
|
return NULL;
|
2012-09-13 16:51:58 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 09:48:16 +02:00
|
|
|
if (!nm_auth_is_subject_in_acl_set_error(connection,
|
|
|
|
|
subject,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
error))
|
2018-04-12 10:34:11 +02:00
|
|
|
return NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 11:19:06 +02:00
|
|
|
is_vpn = _connection_is_vpn(connection);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 10:34:11 +02:00
|
|
|
if (*out_device) {
|
|
|
|
|
device = *out_device;
|
|
|
|
|
nm_assert(NM_IS_DEVICE(device));
|
|
|
|
|
nm_assert(device_path);
|
|
|
|
|
nm_assert(nm_streq0(device_path, nm_dbus_object_get_path(NM_DBUS_OBJECT(device))));
|
|
|
|
|
nm_assert(device == nm_manager_get_device_by_path(self, device_path));
|
|
|
|
|
} else if (device_path) {
|
2012-09-13 13:17:46 -05:00
|
|
|
device = nm_manager_get_device_by_path(self, device_path);
|
2013-08-28 16:19:20 -05:00
|
|
|
if (!device) {
|
|
|
|
|
g_set_error_literal(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"Device not found");
|
2018-04-12 10:34:11 +02:00
|
|
|
return NULL;
|
2013-08-28 16:19:20 -05:00
|
|
|
}
|
2018-04-12 13:37:40 +02:00
|
|
|
} else if (!is_vpn) {
|
2018-07-10 11:17:05 +02:00
|
|
|
gs_free_error GError *local = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
device = nm_manager_get_best_device_for_connection(self,
|
|
|
|
|
sett_conn,
|
|
|
|
|
connection,
|
|
|
|
|
TRUE,
|
|
|
|
|
NULL,
|
|
|
|
|
&local);
|
2018-04-12 13:37:40 +02:00
|
|
|
if (!device) {
|
2018-04-12 10:34:11 +02:00
|
|
|
gs_free char *iface = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 10:34:11 +02:00
|
|
|
/* VPN and software-device connections don't need a device yet,
|
|
|
|
|
* but non-virtual connections do ... */
|
|
|
|
|
if (!nm_connection_is_virtual(connection)) {
|
2018-07-10 11:17:05 +02:00
|
|
|
g_set_error(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"No suitable device found for this connection (%s).",
|
|
|
|
|
local->message);
|
2018-04-12 10:34:11 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 10:34:11 +02:00
|
|
|
/* Look for an existing device with the connection's interface name */
|
2019-11-21 18:05:11 +01:00
|
|
|
iface = nm_manager_get_connection_iface(self, connection, NULL, NULL, error);
|
2018-04-12 10:34:11 +02:00
|
|
|
if (!iface)
|
|
|
|
|
return NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 10:34:11 +02:00
|
|
|
device = find_device_by_iface(self, iface, connection, NULL);
|
|
|
|
|
if (!device) {
|
|
|
|
|
g_set_error_literal(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"Failed to find a compatible device for this connection");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2013-08-28 16:19:20 -05:00
|
|
|
}
|
2014-10-15 21:17:45 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-06 15:54:16 +02:00
|
|
|
nm_assert(is_vpn || NM_IS_DEVICE(device));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
*out_device = device;
|
2018-04-12 11:15:42 +02:00
|
|
|
*out_is_vpn = is_vpn;
|
2018-04-12 10:34:11 +02:00
|
|
|
return g_steal_pointer(&subject);
|
2012-09-13 13:17:46 -05:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2012-09-13 16:51:58 -05:00
|
|
|
|
2010-06-03 23:20:11 -07:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
_activation_auth_done(NMManager *self,
|
|
|
|
|
NMActiveConnection *active,
|
2018-04-18 09:06:54 +02:00
|
|
|
GDBusMethodInvocation *invocation,
|
2014-02-26 16:04:45 -06:00
|
|
|
gboolean success,
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *error_desc)
|
2013-08-28 16:19:20 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
GError *error = NULL;
|
|
|
|
|
NMAuthSubject *subject;
|
2015-07-14 16:53:24 +02:00
|
|
|
NMSettingsConnection *connection;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-07-14 10:26:54 +02:00
|
|
|
subject = nm_active_connection_get_subject(active);
|
2015-07-14 16:53:24 +02:00
|
|
|
connection = nm_active_connection_get_settings_connection(active);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 15:40:32 +02:00
|
|
|
if (!success) {
|
2013-08-28 16:19:20 -05:00
|
|
|
error =
|
|
|
|
|
g_error_new_literal(NM_MANAGER_ERROR, NM_MANAGER_ERROR_PERMISSION_DENIED, error_desc);
|
2018-04-12 15:40:32 +02:00
|
|
|
goto fail;
|
2012-09-13 16:51:58 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 15:40:32 +02:00
|
|
|
if (!_internal_activate_generic(self, active, &error))
|
|
|
|
|
goto fail;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 15:40:32 +02:00
|
|
|
nm_settings_connection_autoconnect_blocked_reason_set(
|
|
|
|
|
connection,
|
|
|
|
|
NM_SETTINGS_AUTO_CONNECT_BLOCKED_REASON_USER_REQUEST,
|
|
|
|
|
FALSE);
|
2018-04-18 09:06:54 +02:00
|
|
|
g_dbus_method_invocation_return_value(
|
|
|
|
|
invocation,
|
2018-04-12 15:40:32 +02:00
|
|
|
g_variant_new("(o)", nm_dbus_object_get_path(NM_DBUS_OBJECT(active))));
|
|
|
|
|
nm_audit_log_connection_op(NM_AUDIT_OP_CONN_ACTIVATE, connection, TRUE, NULL, subject, NULL);
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
fail:
|
2020-11-04 09:46:41 +01:00
|
|
|
_delete_volatile_connection_do(self, connection);
|
|
|
|
|
|
2016-04-20 12:10:55 +02:00
|
|
|
nm_audit_log_connection_op(NM_AUDIT_OP_CONN_ACTIVATE,
|
|
|
|
|
connection,
|
|
|
|
|
FALSE,
|
|
|
|
|
NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
subject,
|
|
|
|
|
error->message);
|
2018-02-05 16:41:10 +01:00
|
|
|
nm_active_connection_set_state_fail(active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_UNKNOWN,
|
|
|
|
|
error->message);
|
2015-07-14 10:26:54 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
g_dbus_method_invocation_take_error(invocation, error);
|
2008-11-21 18:11:15 +00:00
|
|
|
}
|
|
|
|
|
|
2007-10-01 15:38:39 +00:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_activate_connection(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *dbus_connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER(obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
gs_unref_object NMActiveConnection *active = NULL;
|
|
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
|
|
|
|
NMSettingsConnection *sett_conn = NULL;
|
|
|
|
|
NMDevice *device = NULL;
|
|
|
|
|
gboolean is_vpn = FALSE;
|
|
|
|
|
GError *error = NULL;
|
|
|
|
|
const char *connection_path;
|
|
|
|
|
const char *device_path;
|
|
|
|
|
const char *specific_object_path;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get(parameters, "(&o&o&o)", &connection_path, &device_path, &specific_object_path);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-01-29 15:55:38 +01:00
|
|
|
connection_path = nm_dbus_path_not_empty(connection_path);
|
|
|
|
|
specific_object_path = nm_dbus_path_not_empty(specific_object_path);
|
|
|
|
|
device_path = nm_dbus_path_not_empty(device_path);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-07-24 10:41:39 -05:00
|
|
|
/* If the connection path is given and valid, that connection is activated.
|
2020-07-01 17:20:40 -04:00
|
|
|
* Otherwise, the "best" connection for the device is chosen and activated,
|
2013-07-24 10:41:39 -05:00
|
|
|
* regardless of whether that connection is autoconnect-enabled or not
|
|
|
|
|
* (since this is an explicit request, not an auto-activation request).
|
|
|
|
|
*/
|
2016-03-23 10:35:55 +01:00
|
|
|
if (connection_path) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_settings_get_connection_by_path(priv->settings, connection_path);
|
|
|
|
|
if (!sett_conn) {
|
2016-03-23 10:35:55 +01:00
|
|
|
error = g_error_new_literal(NM_MANAGER_ERROR,
|
2017-06-01 17:03:46 +02:00
|
|
|
NM_MANAGER_ERROR_UNKNOWN_CONNECTION,
|
|
|
|
|
"Connection could not be found.");
|
2016-03-23 10:35:55 +01:00
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2013-07-24 10:41:39 -05:00
|
|
|
/* If no connection is given, find a suitable connection for the given device path */
|
|
|
|
|
if (!device_path) {
|
|
|
|
|
error = g_error_new_literal(
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
|
|
|
|
"Only devices may be activated without a specifying a connection");
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
device = nm_manager_get_device_by_path(self, device_path);
|
|
|
|
|
if (!device) {
|
|
|
|
|
error = g_error_new(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_UNKNOWN_DEVICE,
|
2016-03-30 09:00:06 +02:00
|
|
|
"Can not activate an unknown device '%s'",
|
|
|
|
|
device_path);
|
2013-07-24 10:41:39 -05:00
|
|
|
goto error;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_device_get_best_connection(device, specific_object_path, &error);
|
|
|
|
|
if (!sett_conn)
|
2013-07-24 10:41:39 -05:00
|
|
|
goto error;
|
2012-09-13 13:17:46 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-07-29 12:42:16 -05:00
|
|
|
subject = validate_activation_request(self,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
invocation,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
|
|
|
|
NULL,
|
2013-07-29 12:42:16 -05:00
|
|
|
device_path,
|
|
|
|
|
&device,
|
2013-08-28 16:19:20 -05:00
|
|
|
&is_vpn,
|
2013-07-29 12:42:16 -05:00
|
|
|
&error);
|
|
|
|
|
if (!subject)
|
2012-09-13 13:17:46 -05:00
|
|
|
goto error;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
active = _new_active_connection(self,
|
2018-04-12 11:32:18 +02:00
|
|
|
is_vpn,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn,
|
2016-09-07 17:47:26 +02:00
|
|
|
NULL,
|
2020-09-28 16:03:33 +02:00
|
|
|
NULL,
|
2013-08-28 16:19:20 -05:00
|
|
|
specific_object_path,
|
|
|
|
|
device,
|
|
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
2018-03-28 17:18:04 +02:00
|
|
|
NM_ACTIVATION_REASON_USER_REQUEST,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
_activation_bind_lifetime_to_profile_visibility(subject),
|
2013-08-28 16:19:20 -05:00
|
|
|
&error);
|
|
|
|
|
if (!active)
|
2013-07-29 12:42:16 -05:00
|
|
|
goto error;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-06-04 10:36:52 +02:00
|
|
|
nm_active_connection_authorize(
|
|
|
|
|
active,
|
2018-02-05 14:51:27 +01:00
|
|
|
NULL,
|
2018-04-18 09:06:54 +02:00
|
|
|
_async_op_complete_ac_auth_cb,
|
|
|
|
|
_async_op_data_new_ac_auth_activate_user(self, active, invocation));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-06-04 10:36:52 +02:00
|
|
|
/* we passed the pointer on to _async_op_data_new_ac_auth_activate_user() */
|
|
|
|
|
g_steal_pointer(&active);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-07-29 12:42:16 -05:00
|
|
|
return;
|
|
|
|
|
|
2012-09-13 13:17:46 -05:00
|
|
|
error:
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (sett_conn) {
|
|
|
|
|
nm_audit_log_connection_op(NM_AUDIT_OP_CONN_ACTIVATE,
|
|
|
|
|
sett_conn,
|
|
|
|
|
FALSE,
|
|
|
|
|
NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
subject,
|
|
|
|
|
error->message);
|
|
|
|
|
}
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error(invocation, error);
|
2011-01-10 23:39:12 -06:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2012-09-13 16:51:58 -05:00
|
|
|
|
2011-01-10 23:39:12 -06:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
activation_add_done(NMSettings *settings,
|
|
|
|
|
NMSettingsConnection *new_connection,
|
|
|
|
|
GError *error,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMAuthSubject *subject,
|
2011-01-10 23:39:12 -06:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self;
|
2015-07-14 16:53:24 +02:00
|
|
|
gs_unref_object NMActiveConnection *active = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
gs_free_error GError *local = NULL;
|
|
|
|
|
gpointer async_op_type_ptr;
|
|
|
|
|
AsyncOpType async_op_type;
|
|
|
|
|
GVariant *result_floating;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
nm_utils_user_data_unpack(user_data, &self, &active, &async_op_type_ptr);
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
async_op_type = GPOINTER_TO_INT(async_op_type_ptr);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-12-19 17:35:17 +01:00
|
|
|
if (error)
|
|
|
|
|
goto fail;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-12-19 17:35:17 +01:00
|
|
|
nm_active_connection_set_settings_connection(active, new_connection);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
if (!_internal_activate_generic(self, active, &local))
|
2018-12-19 17:35:17 +01:00
|
|
|
goto fail;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
if (async_op_type == ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE) {
|
|
|
|
|
result_floating = g_variant_new("(oo)",
|
|
|
|
|
nm_dbus_object_get_path(NM_DBUS_OBJECT(new_connection)),
|
|
|
|
|
nm_dbus_object_get_path(NM_DBUS_OBJECT(active)));
|
|
|
|
|
} else {
|
2019-01-29 13:24:43 +01:00
|
|
|
result_floating = g_variant_new("(oo@a{sv})",
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
nm_dbus_object_get_path(NM_DBUS_OBJECT(new_connection)),
|
|
|
|
|
nm_dbus_object_get_path(NM_DBUS_OBJECT(active)),
|
2021-04-14 21:12:02 +02:00
|
|
|
nm_g_variant_singleton_aLsvI());
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
}
|
|
|
|
|
g_dbus_method_invocation_return_value(context, result_floating);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-12-19 17:35:17 +01:00
|
|
|
nm_audit_log_connection_op(NM_AUDIT_OP_CONN_ADD_ACTIVATE,
|
|
|
|
|
nm_active_connection_get_settings_connection(active),
|
|
|
|
|
TRUE,
|
|
|
|
|
NULL,
|
|
|
|
|
nm_active_connection_get_subject(active),
|
|
|
|
|
NULL);
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
fail:
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
if (local) {
|
|
|
|
|
nm_assert(!error);
|
|
|
|
|
error = local;
|
|
|
|
|
} else
|
|
|
|
|
nm_assert(error);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-02-05 16:41:10 +01:00
|
|
|
nm_active_connection_set_state_fail(active,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_UNKNOWN,
|
|
|
|
|
error->message);
|
2016-12-23 14:02:58 +01:00
|
|
|
if (new_connection)
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
nm_settings_connection_delete(new_connection, FALSE);
|
2015-04-15 14:53:30 -04:00
|
|
|
g_dbus_method_invocation_return_gerror(context, error);
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_connection_op(NM_AUDIT_OP_CONN_ADD_ACTIVATE,
|
2015-08-10 12:33:01 +02:00
|
|
|
NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
FALSE,
|
2016-04-20 12:10:55 +02:00
|
|
|
NULL,
|
2015-07-14 16:53:24 +02:00
|
|
|
nm_active_connection_get_subject(active),
|
2015-07-14 10:26:54 +02:00
|
|
|
error->message);
|
2011-01-10 23:39:12 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
_add_and_activate_auth_done(NMManager *self,
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
AsyncOpType async_op_type,
|
2021-11-09 13:28:54 +01:00
|
|
|
NMActiveConnection *active,
|
|
|
|
|
NMConnection *connection,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
NMSettingsConnectionPersistMode persist_mode,
|
|
|
|
|
gboolean is_volatile,
|
2014-02-26 16:04:45 -06:00
|
|
|
gboolean success,
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *error_desc)
|
2011-01-10 23:39:12 -06:00
|
|
|
{
|
2018-04-17 14:59:27 +02:00
|
|
|
NMManagerPrivate *priv;
|
2021-11-09 13:28:54 +01:00
|
|
|
GError *error = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 12:01:25 +02:00
|
|
|
if (!success) {
|
2013-08-28 16:19:20 -05:00
|
|
|
error =
|
|
|
|
|
g_error_new_literal(NM_MANAGER_ERROR, NM_MANAGER_ERROR_PERMISSION_DENIED, error_desc);
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_connection_op(NM_AUDIT_OP_CONN_ADD_ACTIVATE,
|
2015-08-10 12:33:01 +02:00
|
|
|
NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
FALSE,
|
2016-04-20 12:10:55 +02:00
|
|
|
NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_active_connection_get_subject(active),
|
|
|
|
|
error->message);
|
2018-04-18 09:06:54 +02:00
|
|
|
g_dbus_method_invocation_take_error(invocation, error);
|
2018-04-12 12:01:25 +02:00
|
|
|
return;
|
2011-01-10 23:39:12 -06:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-17 14:59:27 +02:00
|
|
|
priv = NM_MANAGER_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-18 10:23:22 +02:00
|
|
|
/* FIXME(shutdown): nm_settings_add_connection_dbus() cannot be cancelled. It should be made
|
|
|
|
|
* cancellable and tracked via AsyncOpData to be able to do a clean
|
|
|
|
|
* shutdown. */
|
2018-04-12 12:01:25 +02:00
|
|
|
nm_settings_add_connection_dbus(
|
|
|
|
|
priv->settings,
|
2022-03-14 10:20:11 +01:00
|
|
|
NULL,
|
2018-04-12 12:01:25 +02:00
|
|
|
connection,
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
persist_mode,
|
core,libnm: add AddConnection2() D-Bus API to block autoconnect from the start
It should be possible to add a profile with autoconnect blocked form the
start. Update2() has a %NM_SETTINGS_UPDATE2_FLAG_BLOCK_AUTOCONNECT flag to
block autoconnect, and so we need something similar when adding a connection.
As the existing AddConnection() and AddConnectionUnsaved() API is not
extensible, add AddConnection2() that has flags and room for additional
arguments.
Then add and implement the new flag %NM_SETTINGS_ADD_CONNECTION2_FLAG_BLOCK_AUTOCONNECT
for AddConnection2().
Note that libnm's nm_client_add_connection2() API can completely replace
the existing nm_client_add_connection_async() call. In particular, it
will automatically prefer to call the D-Bus methods AddConnection() and
AddConnectionUnsaved(), in order to work with server versions older than
1.20. The purpose of this is that when upgrading the package, the
running NetworkManager might still be older than the installed libnm.
Anyway, so since nm_client_add_connection2_finish() also has a result
output, the caller needs to decide whether he cares about that result.
Hence it has an argument ignore_out_result, which allows to fallback to
the old API. One might argue that a caller who doesn't care about the
output results while still wanting to be backward compatible, should
itself choose to call nm_client_add_connection_async() or
nm_client_add_connection2(). But instead, it's more convenient if the
new function can fully replace the old one, so that the caller does not
need to switch which start/finish method to call.
https://bugzilla.redhat.com/show_bug.cgi?id=1677068
2019-07-09 15:22:01 +02:00
|
|
|
NM_SETTINGS_CONNECTION_ADD_REASON_NONE,
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
(is_volatile ? NM_SETTINGS_CONNECTION_INT_FLAGS_VOLATILE
|
|
|
|
|
: NM_SETTINGS_CONNECTION_INT_FLAGS_NONE),
|
2018-04-18 10:10:30 +02:00
|
|
|
nm_active_connection_get_subject(active),
|
2018-04-18 09:06:54 +02:00
|
|
|
invocation,
|
2018-04-12 12:01:25 +02:00
|
|
|
activation_add_done,
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
nm_utils_user_data_pack(self, g_object_ref(active), GINT_TO_POINTER(async_op_type)));
|
2011-01-10 23:39:12 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_add_and_activate_connection(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *dbus_connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER(obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
gs_unref_object NMConnection *incompl_conn = NULL;
|
|
|
|
|
gs_unref_object NMActiveConnection *active = NULL;
|
|
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
|
|
|
|
GError *error = NULL;
|
|
|
|
|
NMDevice *device = NULL;
|
|
|
|
|
gboolean is_vpn = FALSE;
|
|
|
|
|
gs_unref_variant GVariant *settings = NULL;
|
|
|
|
|
gs_unref_variant GVariant *options = NULL;
|
|
|
|
|
const char *device_path;
|
|
|
|
|
const char *specific_object_path;
|
|
|
|
|
gs_free NMConnection **conns = NULL;
|
|
|
|
|
NMSettingsConnectionPersistMode persist_mode = NM_SETTINGS_CONNECTION_PERSIST_MODE_TO_DISK;
|
|
|
|
|
gboolean is_volatile = FALSE;
|
|
|
|
|
gboolean bind_dbus_client = FALSE;
|
|
|
|
|
AsyncOpType async_op_type;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
if (nm_streq(method_info->parent.name, "AddAndActivateConnection2")) {
|
|
|
|
|
async_op_type = ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE2;
|
2018-10-30 16:40:40 +01:00
|
|
|
g_variant_get(parameters,
|
|
|
|
|
"(@a{sa{sv}}&o&o@a{sv})",
|
|
|
|
|
&settings,
|
|
|
|
|
&device_path,
|
|
|
|
|
&specific_object_path,
|
|
|
|
|
&options);
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
} else {
|
|
|
|
|
nm_assert(nm_streq(method_info->parent.name, "AddAndActivateConnection"));
|
|
|
|
|
async_op_type = ASYNC_OP_TYPE_AC_AUTH_ADD_AND_ACTIVATE;
|
2018-10-30 16:40:40 +01:00
|
|
|
g_variant_get(parameters,
|
|
|
|
|
"(@a{sa{sv}}&o&o)",
|
|
|
|
|
&settings,
|
|
|
|
|
&device_path,
|
|
|
|
|
&specific_object_path);
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-10-30 16:40:40 +01:00
|
|
|
if (options) {
|
|
|
|
|
GVariantIter iter;
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *option_name;
|
|
|
|
|
GVariant *option_value;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-10-30 16:40:40 +01:00
|
|
|
g_variant_iter_init(&iter, options);
|
|
|
|
|
while (g_variant_iter_next(&iter, "{&sv}", &option_name, &option_value)) {
|
|
|
|
|
gs_unref_variant GVariant *option_value_free = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *s;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-10-30 16:40:40 +01:00
|
|
|
option_value_free = option_value;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-11-17 15:54:49 +01:00
|
|
|
if (nm_streq(option_name, "persist")
|
|
|
|
|
&& g_variant_is_of_type(option_value, G_VARIANT_TYPE_STRING)) {
|
2018-10-25 16:54:37 +02:00
|
|
|
s = g_variant_get_string(option_value, NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
is_volatile = FALSE;
|
2019-07-22 21:56:05 +02:00
|
|
|
persist_mode = NM_SETTINGS_CONNECTION_PERSIST_MODE_TO_DISK;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
if (nm_streq(s, "volatile")) {
|
|
|
|
|
persist_mode = NM_SETTINGS_CONNECTION_PERSIST_MODE_IN_MEMORY_ONLY;
|
|
|
|
|
is_volatile = TRUE;
|
|
|
|
|
} else if (nm_streq(s, "memory"))
|
|
|
|
|
persist_mode = NM_SETTINGS_CONNECTION_PERSIST_MODE_IN_MEMORY_ONLY;
|
|
|
|
|
else if (nm_streq(s, "disk")) {
|
|
|
|
|
/* pass */
|
|
|
|
|
} else {
|
2018-10-25 16:54:37 +02:00
|
|
|
error = g_error_new_literal(
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_INVALID_ARGUMENTS,
|
2018-11-17 15:54:49 +01:00
|
|
|
"Option \"persist\" must be one of \"volatile\", \"memory\" or \"disk\"");
|
2018-10-25 16:54:37 +02:00
|
|
|
goto error;
|
|
|
|
|
}
|
2018-11-19 11:16:44 +01:00
|
|
|
} else if (nm_streq(option_name, "bind-activation")
|
2018-11-17 15:54:49 +01:00
|
|
|
&& g_variant_is_of_type(option_value, G_VARIANT_TYPE_STRING)) {
|
2018-10-26 13:17:31 +02:00
|
|
|
s = g_variant_get_string(option_value, NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-11-18 11:52:13 +01:00
|
|
|
if (nm_streq(s, "dbus-client"))
|
|
|
|
|
bind_dbus_client = TRUE;
|
|
|
|
|
else if (nm_streq(s, "none"))
|
|
|
|
|
bind_dbus_client = FALSE;
|
|
|
|
|
else {
|
2018-10-26 13:17:31 +02:00
|
|
|
error = g_error_new_literal(
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_INVALID_ARGUMENTS,
|
2018-11-19 11:16:44 +01:00
|
|
|
"Option \"bind-activation\" must be one of \"dbus-client\" or \"none\"");
|
2018-10-26 13:17:31 +02:00
|
|
|
goto error;
|
|
|
|
|
}
|
2018-10-25 16:54:37 +02:00
|
|
|
} else {
|
|
|
|
|
error = g_error_new_literal(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_INVALID_ARGUMENTS,
|
2018-11-17 15:54:49 +01:00
|
|
|
"Unknown extra option passed");
|
2018-10-25 16:54:37 +02:00
|
|
|
goto error;
|
2018-10-30 16:40:40 +01:00
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2020-01-29 15:55:38 +01:00
|
|
|
specific_object_path = nm_dbus_path_not_empty(specific_object_path);
|
|
|
|
|
device_path = nm_dbus_path_not_empty(device_path);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-11-01 13:59:57 +01:00
|
|
|
/* Try to create a new connection with the given settings.
|
|
|
|
|
* We allow empty settings for AddAndActivateConnection(). In that case,
|
2014-02-10 11:13:55 +01:00
|
|
|
* the connection will be completed in nm_utils_complete_generic() or
|
2013-11-01 13:59:57 +01:00
|
|
|
* nm_device_complete_connection() below. Just make sure we don't expect
|
|
|
|
|
* specific data being in the connection till then (especially in
|
|
|
|
|
* validate_activation_request()).
|
|
|
|
|
*/
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
incompl_conn = nm_simple_connection_new();
|
2015-04-15 14:53:30 -04:00
|
|
|
if (settings && g_variant_n_children(settings))
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
_nm_connection_replace_settings(incompl_conn,
|
|
|
|
|
settings,
|
|
|
|
|
NM_SETTING_PARSE_FLAGS_STRICT,
|
|
|
|
|
NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-07-29 12:42:16 -05:00
|
|
|
subject = validate_activation_request(self,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
invocation,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NULL,
|
|
|
|
|
incompl_conn,
|
2013-07-29 12:42:16 -05:00
|
|
|
device_path,
|
|
|
|
|
&device,
|
2018-04-12 11:15:42 +02:00
|
|
|
&is_vpn,
|
2013-07-29 12:42:16 -05:00
|
|
|
&error);
|
|
|
|
|
if (!subject)
|
2012-09-13 13:17:46 -05:00
|
|
|
goto error;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-12 11:15:42 +02:00
|
|
|
if (is_vpn) {
|
2012-09-13 13:17:46 -05:00
|
|
|
/* Try to fill the VPN's connection setting and name at least */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!nm_connection_get_setting_vpn(incompl_conn)) {
|
2014-10-15 15:27:25 -04:00
|
|
|
error = g_error_new_literal(NM_CONNECTION_ERROR,
|
|
|
|
|
NM_CONNECTION_ERROR_MISSING_SETTING,
|
2012-09-13 13:17:46 -05:00
|
|
|
"VPN connections require a 'vpn' setting");
|
2014-10-15 15:27:25 -04:00
|
|
|
g_prefix_error(&error, "%s: ", NM_SETTING_VPN_SETTING_NAME);
|
2012-09-13 13:17:46 -05:00
|
|
|
goto error;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
conns = nm_settings_connections_array_to_connections(
|
|
|
|
|
nm_settings_get_connections(priv->settings, NULL),
|
|
|
|
|
-1);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-09-29 15:11:33 +02:00
|
|
|
nm_utils_complete_generic(priv->platform,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
incompl_conn,
|
2012-09-13 13:17:46 -05:00
|
|
|
NM_SETTING_VPN_SETTING_NAME,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
conns,
|
2012-09-13 13:17:46 -05:00
|
|
|
NULL,
|
2014-08-25 16:21:59 +02:00
|
|
|
_("VPN connection"),
|
2019-05-07 10:15:10 +02:00
|
|
|
NULL,
|
2020-09-28 16:03:33 +02:00
|
|
|
NULL,
|
2012-09-13 13:17:46 -05:00
|
|
|
FALSE); /* No IPv6 by default for now */
|
|
|
|
|
} else {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
conns = nm_settings_connections_array_to_connections(
|
|
|
|
|
nm_settings_get_connections(priv->settings, NULL),
|
|
|
|
|
-1);
|
2012-09-13 13:17:46 -05:00
|
|
|
/* Let each device subclass complete the connection */
|
|
|
|
|
if (!nm_device_complete_connection(device,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
incompl_conn,
|
2012-09-13 13:17:46 -05:00
|
|
|
specific_object_path,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
conns,
|
2012-09-13 13:17:46 -05:00
|
|
|
&error))
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-06-20 15:45:34 +02:00
|
|
|
nm_assert(_nm_connection_verify(incompl_conn, NULL) == NM_SETTING_VERIFY_SUCCESS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-28 16:19:20 -05:00
|
|
|
active = _new_active_connection(self,
|
2018-04-12 11:32:18 +02:00
|
|
|
is_vpn,
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NULL,
|
|
|
|
|
incompl_conn,
|
2016-09-07 17:47:26 +02:00
|
|
|
NULL,
|
2013-08-28 16:19:20 -05:00
|
|
|
specific_object_path,
|
|
|
|
|
device,
|
|
|
|
|
subject,
|
2017-03-07 11:04:36 +01:00
|
|
|
NM_ACTIVATION_TYPE_MANAGED,
|
2018-03-28 17:18:04 +02:00
|
|
|
NM_ACTIVATION_REASON_USER_REQUEST,
|
core: improve and fix keeping connection active based on "connection.permissions"
By setting "connection.permissions", a profile is restricted to a
particular user.
That means for example, that another user cannot see, modify, delete,
activate or deactivate the profile. It also means, that the profile
will only autoconnect when the user is logged in (has a session).
Note that root is always able to activate the profile. Likewise, the
user is also allowed to manually activate the own profile, even if no
session currently exists (which can easily happen with `sudo`).
When the user logs out (the session goes away), we want do disconnect
the profile, however there are conflicting goals here:
1) if the profile was activate by root user, then logging out the user
should not disconnect the profile. The patch fixes that by not
binding the activation to the connection, if the activation is done
by the root user.
2) if the profile was activated by the owner when it had no session,
then it should stay alive until the user logs in (once) and logs
out again. This is already handled by the previous commit.
Yes, this point is odd. If you first do
$ sudo -u $OTHER_USER nmcli connection up $PROFILE
the profile activates despite not having a session. If you then
$ ssh guest@localhost nmcli device
you'll still see the profile active. However, the moment the SSH session
ends, a session closes and the profile disconnects. It's unclear, how to
solve that any better. I think, a user who cares about this, should not
activate the profile without having a session in the first place.
There are quite some special cases, in particular with internal
activations. In those cases we need to decide whether to bind the
activation to the profile's visibility.
Also, expose the "bind" setting in the D-Bus API. Note, that in the future
this flag may be modified via D-Bus API. Like we may also add related API
that allows to tweak the lifetime of the activation.
Also, I think we broke handling of connection visiblity with 37e8c53eeed
"core: Introduce helper class to track connection keep alive". This
should be fixed now too, with improved behavior.
Fixes: 37e8c53eeed579fe34a68819cd12f3295d581394
https://bugzilla.redhat.com/show_bug.cgi?id=1530977
2018-11-21 13:30:16 +01:00
|
|
|
_activation_bind_lifetime_to_profile_visibility(subject),
|
2013-08-28 16:19:20 -05:00
|
|
|
&error);
|
|
|
|
|
if (!active)
|
2013-07-29 12:42:16 -05:00
|
|
|
goto error;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-11-20 14:51:22 +01:00
|
|
|
if (bind_dbus_client) {
|
|
|
|
|
NMKeepAlive *keep_alive;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-11-20 14:51:22 +01:00
|
|
|
keep_alive = nm_active_connection_get_keep_alive(active);
|
|
|
|
|
nm_keep_alive_set_dbus_client_watch(keep_alive, dbus_connection, sender);
|
|
|
|
|
nm_keep_alive_arm(keep_alive);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
nm_active_connection_authorize(active,
|
|
|
|
|
incompl_conn,
|
2018-04-18 09:06:54 +02:00
|
|
|
_async_op_complete_ac_auth_cb,
|
|
|
|
|
_async_op_data_new_ac_auth_add_and_activate(self,
|
all: return output dictionary from "AddAndActivate2"
Add a "a{sv}" output argument to "AddAndActivate2" D-Bus API.
"AddAndActivate2" replaces "AddAndActivate" with more options.
It also has a dictionary argument to be forward compatible so that we
hopefully won't need an "AddAndActivate3". However, it lacked a similar
output dictionary. Add it for future extensibility. I think this is
really to workaround a shortcoming of D-Bus, which does provide strong
typing and type information about its API, but does not allow to extend
an existing API in a backward compatible manner. So we either resort to
Method(), Method2(), Method3() variants, or a catch-all variant with a
generic "a{sv}" input/output argument.
In libnm, rename "nm_client_add_and_activate_connection_options()" to
"nm_client_add_and_activate_connection2()". I think libnm API should have
an obvious correspondence with D-Bus API. Or stated differently, if
"AddAndActivateOptions" would be a better name, then the D-Bus API should
be renamed. We should prefer one name over the other, but regardless
of which is preferred, the naming for D-Bus and libnm API should
correspond.
In this case, I do think that AddAndActivate2() is a better name than
AddAndActivateOptions(). Hence I rename the libnm API.
Also, unless necessary, let libnm still call "AddAndActivate" instead of
"AddAndActivate2". Our backward compatibility works the way that libnm
requires a server version at least as new as itself. As such, libnm
theoretically could assume that server version is new enough to support
"AddAndActivate2" and could always use the more powerful variant.
However, we don't need to break compatibility intentionally and for
little gain. Here, it's easy to let libnm also handle old server API, by
continuing to use "AddAndActivate" for nm_client_add_and_activate_connection().
Note that during package update, we don't restart the currently running
NetworkManager instance. In such a scenario, it can easily happen that
nmcli/libnm is newer than the server version. Let's try a bit harder
to not break that.
Changes as discussed in [1].
[1] https://gitlab.freedesktop.org/NetworkManager/NetworkManager/merge_requests/37#note_79876
2018-12-20 07:48:31 +01:00
|
|
|
async_op_type,
|
2018-04-18 09:06:54 +02:00
|
|
|
active,
|
|
|
|
|
invocation,
|
2018-10-25 16:54:37 +02:00
|
|
|
incompl_conn,
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
persist_mode,
|
|
|
|
|
is_volatile));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
/* we passed the pointers on to _async_op_data_new_ac_auth_add_and_activate() */
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
g_steal_pointer(&incompl_conn);
|
2018-04-12 12:01:25 +02:00
|
|
|
g_steal_pointer(&active);
|
2013-08-28 16:19:20 -05:00
|
|
|
return;
|
2013-07-29 12:42:16 -05:00
|
|
|
|
2012-09-13 13:17:46 -05:00
|
|
|
error:
|
2016-04-20 12:10:55 +02:00
|
|
|
nm_audit_log_connection_op(NM_AUDIT_OP_CONN_ADD_ACTIVATE,
|
|
|
|
|
NULL,
|
|
|
|
|
FALSE,
|
|
|
|
|
NULL,
|
|
|
|
|
subject,
|
|
|
|
|
error->message);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error(invocation, error);
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2012-09-13 16:51:58 -05:00
|
|
|
|
2008-03-26 13:43:01 +00:00
|
|
|
gboolean
|
2021-11-09 13:28:54 +01:00
|
|
|
nm_manager_deactivate_connection(NMManager *manager,
|
2017-01-26 14:20:01 +01:00
|
|
|
NMActiveConnection *active,
|
2008-10-11 19:57:45 +00:00
|
|
|
NMDeviceStateReason reason,
|
2021-11-09 13:28:54 +01:00
|
|
|
GError **error)
|
2007-10-03 14:48:25 +00:00
|
|
|
{
|
2012-09-14 15:21:29 -05:00
|
|
|
if (NM_IS_VPN_CONNECTION(active)) {
|
2017-03-11 14:39:07 +01:00
|
|
|
NMActiveConnectionStateReason vpn_reason =
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_USER_DISCONNECTED;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-02-23 15:19:03 +01:00
|
|
|
if (nm_device_state_reason_check(reason) == NM_DEVICE_STATE_REASON_CONNECTION_REMOVED)
|
2017-03-11 14:39:07 +01:00
|
|
|
vpn_reason = NM_ACTIVE_CONNECTION_STATE_REASON_CONNECTION_REMOVED;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-11-20 14:38:38 +01:00
|
|
|
if (!nm_vpn_connection_deactivate(NM_VPN_CONNECTION(active), vpn_reason, FALSE)) {
|
2013-06-28 14:04:29 +02:00
|
|
|
g_set_error_literal(error,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_CONNECTION_NOT_ACTIVE,
|
|
|
|
|
"The VPN connection was not active.");
|
2018-11-20 14:38:38 +01:00
|
|
|
return FALSE;
|
|
|
|
|
}
|
2008-03-26 13:43:01 +00:00
|
|
|
} else {
|
2018-11-20 14:41:14 +01:00
|
|
|
nm_assert(NM_IS_ACT_REQUEST(active));
|
|
|
|
|
nm_device_disconnect_active_connection(active,
|
|
|
|
|
reason,
|
|
|
|
|
NM_ACTIVE_CONNECTION_STATE_REASON_UNKNOWN);
|
2008-03-26 13:43:01 +00:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-11-20 14:38:38 +01:00
|
|
|
_notify(manager, PROP_ACTIVE_CONNECTIONS);
|
|
|
|
|
return TRUE;
|
2008-03-26 13:43:01 +00:00
|
|
|
}
|
|
|
|
|
|
2010-06-04 00:42:10 -07:00
|
|
|
static void
|
|
|
|
|
deactivate_net_auth_done_cb(NMAuthChain *chain, GDBusMethodInvocation *context, gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
|
|
|
|
GError *error = NULL;
|
2010-06-04 00:42:10 -07:00
|
|
|
NMAuthCallResult result;
|
2015-07-14 10:26:54 +02:00
|
|
|
NMActiveConnection *active;
|
2021-11-09 13:28:54 +01:00
|
|
|
char *path;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-02 10:08:09 +02:00
|
|
|
nm_assert(G_IS_DBUS_METHOD_INVOCATION(context));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_unlink(nm_auth_chain_parent_lst_list(chain));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-07-14 10:26:54 +02:00
|
|
|
path = nm_auth_chain_get_data(chain, "path");
|
2012-10-08 12:52:15 -05:00
|
|
|
result = nm_auth_chain_get_result(chain, NM_AUTH_PERMISSION_NETWORK_CONTROL);
|
2017-01-26 14:20:01 +01:00
|
|
|
active = active_connection_get_by_path(self, path);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-04 09:37:54 +02:00
|
|
|
if (result != NM_AUTH_CALL_RESULT_YES) {
|
2012-06-01 16:53:23 -05:00
|
|
|
error = g_error_new_literal(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Not authorized to deactivate connections");
|
2017-01-26 14:20:01 +01:00
|
|
|
} else if (!active) {
|
|
|
|
|
error = g_error_new_literal(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_CONNECTION_NOT_ACTIVE,
|
|
|
|
|
"The connection was not active.");
|
2012-06-01 16:53:23 -05:00
|
|
|
} else {
|
|
|
|
|
/* success; deactivation allowed */
|
2011-05-18 22:20:24 -05:00
|
|
|
if (!nm_manager_deactivate_connection(self,
|
2017-01-26 14:20:01 +01:00
|
|
|
active,
|
2011-05-18 22:20:24 -05:00
|
|
|
NM_DEVICE_STATE_REASON_USER_REQUESTED,
|
|
|
|
|
&error))
|
2016-02-28 16:25:36 +01:00
|
|
|
nm_assert(error);
|
2010-06-04 00:42:10 -07:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-07-14 10:26:54 +02:00
|
|
|
if (active) {
|
|
|
|
|
nm_audit_log_connection_op(NM_AUDIT_OP_CONN_DEACTIVATE,
|
2015-07-14 16:53:24 +02:00
|
|
|
nm_active_connection_get_settings_connection(active),
|
2015-07-14 10:26:54 +02:00
|
|
|
!error,
|
2016-04-20 12:10:55 +02:00
|
|
|
NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_auth_chain_get_subject(chain),
|
|
|
|
|
error ? error->message : NULL);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
if (error)
|
|
|
|
|
g_dbus_method_invocation_take_error(context, error);
|
|
|
|
|
else
|
|
|
|
|
g_dbus_method_invocation_return_value(context, NULL);
|
2010-06-04 00:42:10 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_deactivate_connection(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *dbus_connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER(obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMActiveConnection *ac;
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
NMSettingsConnection *sett_conn = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
GError *error = NULL;
|
|
|
|
|
NMAuthSubject *subject = NULL;
|
|
|
|
|
NMAuthChain *chain;
|
|
|
|
|
const char *active_path;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get(parameters, "(&o)", &active_path);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2012-08-22 17:11:31 -05:00
|
|
|
/* Find the connection by its object path */
|
2015-08-12 18:29:10 +02:00
|
|
|
ac = active_connection_get_by_path(self, active_path);
|
|
|
|
|
if (ac)
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
sett_conn = nm_active_connection_get_settings_connection(ac);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!sett_conn) {
|
2010-06-04 00:42:10 -07:00
|
|
|
error = g_error_new_literal(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_CONNECTION_NOT_ACTIVE,
|
|
|
|
|
"The connection was not active.");
|
2013-07-29 11:53:23 -05:00
|
|
|
goto done;
|
2010-06-04 00:42:10 -07:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-17 11:18:23 -06:00
|
|
|
/* Validate the caller */
|
2019-12-19 11:30:38 +01:00
|
|
|
subject = nm_dbus_manager_new_auth_subject_from_context(invocation);
|
2014-01-17 11:18:23 -06:00
|
|
|
if (!subject) {
|
|
|
|
|
error = g_error_new_literal(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2019-09-04 10:18:56 +02:00
|
|
|
NM_UTILS_ERROR_MSG_REQ_UID_UKNOWN);
|
2014-01-17 11:18:23 -06:00
|
|
|
goto done;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (!nm_auth_is_subject_in_acl_set_error(nm_settings_connection_get_connection(sett_conn),
|
2018-04-12 09:48:16 +02:00
|
|
|
subject,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
&error))
|
2014-01-17 11:18:23 -06:00
|
|
|
goto done;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_subject(subject, invocation, deactivate_net_auth_done_cb, self);
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_link_tail(&priv->auth_lst_head, nm_auth_chain_parent_lst_list(chain));
|
2013-07-29 11:53:23 -05:00
|
|
|
nm_auth_chain_set_data(chain, "path", g_strdup(active_path), g_free);
|
|
|
|
|
nm_auth_chain_add_call(chain, NM_AUTH_PERMISSION_NETWORK_CONTROL, TRUE);
|
|
|
|
|
|
|
|
|
|
done:
|
2015-07-14 10:26:54 +02:00
|
|
|
if (error) {
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (sett_conn) {
|
|
|
|
|
nm_audit_log_connection_op(NM_AUDIT_OP_CONN_DEACTIVATE,
|
|
|
|
|
sett_conn,
|
|
|
|
|
FALSE,
|
|
|
|
|
NULL,
|
2015-07-14 10:26:54 +02:00
|
|
|
subject,
|
|
|
|
|
error->message);
|
|
|
|
|
}
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error(invocation, error);
|
2015-07-14 10:26:54 +02:00
|
|
|
}
|
|
|
|
|
g_clear_object(&subject);
|
2007-10-03 14:48:25 +00:00
|
|
|
}
|
|
|
|
|
|
2022-07-11 16:06:14 +02:00
|
|
|
static void
|
|
|
|
|
sleep_devices_check_empty(NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
GDBusMethodInvocation *invocation;
|
|
|
|
|
|
|
|
|
|
if (g_hash_table_size(priv->sleep_devices) > 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
while (priv->sleep_invocations) {
|
|
|
|
|
invocation = priv->sleep_invocations->data;
|
|
|
|
|
g_dbus_method_invocation_return_value(invocation, NULL);
|
|
|
|
|
priv->sleep_invocations = g_slist_remove(priv->sleep_invocations, invocation);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-05 14:14:40 +02:00
|
|
|
static gboolean
|
|
|
|
|
sleep_devices_add(NMManager *self, NMDevice *device, gboolean suspending)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2016-05-05 14:14:40 +02:00
|
|
|
NMSleepMonitorInhibitorHandle *handle = NULL;
|
|
|
|
|
|
|
|
|
|
if (g_hash_table_lookup_extended(priv->sleep_devices, device, NULL, (gpointer *) &handle)) {
|
|
|
|
|
if (suspending) {
|
|
|
|
|
/* if we are suspending, always insert a new handle in sleep_devices.
|
|
|
|
|
* Even if we had an old handle, it might be stale by now. */
|
|
|
|
|
g_hash_table_insert(priv->sleep_devices,
|
|
|
|
|
device,
|
|
|
|
|
nm_sleep_monitor_inhibit_take(priv->sleep_monitor));
|
|
|
|
|
if (handle)
|
|
|
|
|
nm_sleep_monitor_inhibit_release(priv->sleep_monitor, handle);
|
|
|
|
|
}
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-05-05 14:14:40 +02:00
|
|
|
g_hash_table_insert(priv->sleep_devices,
|
|
|
|
|
g_object_ref(device),
|
|
|
|
|
suspending ? nm_sleep_monitor_inhibit_take(priv->sleep_monitor) : NULL);
|
2021-08-05 09:06:12 +02:00
|
|
|
g_signal_connect(device, "notify::" NM_DEVICE_STATE, G_CALLBACK(device_sleep_cb), self);
|
2016-05-05 14:14:40 +02:00
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
|
sleep_devices_remove(NMManager *self, NMDevice *device)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2016-05-05 14:14:40 +02:00
|
|
|
NMSleepMonitorInhibitorHandle *handle;
|
|
|
|
|
|
|
|
|
|
if (!g_hash_table_lookup_extended(priv->sleep_devices, device, NULL, (gpointer *) &handle))
|
|
|
|
|
return FALSE;
|
|
|
|
|
|
|
|
|
|
if (handle)
|
|
|
|
|
nm_sleep_monitor_inhibit_release(priv->sleep_monitor, handle);
|
|
|
|
|
|
|
|
|
|
/* Remove device from hash */
|
|
|
|
|
g_signal_handlers_disconnect_by_func(device, device_sleep_cb, self);
|
|
|
|
|
g_hash_table_remove(priv->sleep_devices, device);
|
|
|
|
|
g_object_unref(device);
|
2022-07-11 16:06:14 +02:00
|
|
|
|
|
|
|
|
sleep_devices_check_empty(self);
|
|
|
|
|
|
2016-05-05 14:14:40 +02:00
|
|
|
return TRUE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
sleep_devices_clear(NMManager *self)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMDevice *device;
|
2016-05-05 14:14:40 +02:00
|
|
|
NMSleepMonitorInhibitorHandle *handle;
|
|
|
|
|
GHashTableIter iter;
|
|
|
|
|
|
|
|
|
|
g_hash_table_iter_init(&iter, priv->sleep_devices);
|
|
|
|
|
while (g_hash_table_iter_next(&iter, (gpointer *) &device, (gpointer *) &handle)) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func(device, device_sleep_cb, self);
|
|
|
|
|
if (handle)
|
|
|
|
|
nm_sleep_monitor_inhibit_release(priv->sleep_monitor, handle);
|
|
|
|
|
g_object_unref(device);
|
|
|
|
|
g_hash_table_iter_remove(&iter);
|
|
|
|
|
}
|
2022-07-11 16:06:14 +02:00
|
|
|
|
|
|
|
|
sleep_devices_check_empty(self);
|
2016-05-05 14:14:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
device_sleep_cb(NMDevice *device, GParamSpec *pspec, NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
switch (nm_device_get_state(device)) {
|
|
|
|
|
case NM_DEVICE_STATE_DISCONNECTED:
|
|
|
|
|
_LOGD(LOGD_SUSPEND, "sleep: unmanaging device %s", nm_device_get_ip_iface(device));
|
|
|
|
|
nm_device_set_unmanaged_by_flags_queue(device,
|
|
|
|
|
NM_UNMANAGED_SLEEPING,
|
|
|
|
|
TRUE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_SLEEPING);
|
|
|
|
|
break;
|
|
|
|
|
case NM_DEVICE_STATE_UNMANAGED:
|
|
|
|
|
_LOGD(LOGD_SUSPEND, "sleep: device %s is ready", nm_device_get_ip_iface(device));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-05-05 14:14:40 +02:00
|
|
|
if (!sleep_devices_remove(self, device))
|
|
|
|
|
g_return_if_reached();
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-05-05 14:14:40 +02:00
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-05-22 08:55:30 -07:00
|
|
|
static void
|
2014-04-03 15:14:00 -04:00
|
|
|
do_sleep_wake(NMManager *self, gboolean sleeping_changed)
|
2007-02-08 15:34:26 +00:00
|
|
|
{
|
2010-05-22 08:55:30 -07:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2014-04-03 15:14:00 -04:00
|
|
|
gboolean suspending, waking_from_suspend;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2014-04-03 15:14:00 -04:00
|
|
|
suspending = sleeping_changed && priv->sleeping;
|
|
|
|
|
waking_from_suspend = sleeping_changed && !priv->sleeping;
|
|
|
|
|
|
2010-05-22 08:55:30 -07:00
|
|
|
if (manager_sleeping(self)) {
|
2017-11-29 09:42:39 +01:00
|
|
|
_LOGD(LOGD_SUSPEND, "sleep: %s...", suspending ? "sleeping" : "disabling");
|
2007-02-08 15:34:26 +00:00
|
|
|
|
2014-04-03 15:14:00 -04:00
|
|
|
/* FIXME: are there still hardware devices that need to be disabled around
|
|
|
|
|
* suspend/resume?
|
2007-02-08 15:34:26 +00:00
|
|
|
*/
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2019-04-23 15:30:53 +02:00
|
|
|
if (nm_device_is_software(device)) {
|
|
|
|
|
/* If a user disables networking we consider that as an
|
|
|
|
|
* indication that also software devices must be disconnected.
|
|
|
|
|
* But we don't want to destroy them for external events as
|
|
|
|
|
* a system suspend.
|
|
|
|
|
*/
|
|
|
|
|
if (suspending)
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2014-04-03 15:14:00 -04:00
|
|
|
/* Wake-on-LAN devices will be taken down post-suspend rather than pre- */
|
2017-09-29 15:11:33 +02:00
|
|
|
if (suspending && device_is_wake_on_lan(priv->platform, device)) {
|
2016-05-05 14:14:40 +02:00
|
|
|
_LOGD(LOGD_SUSPEND,
|
|
|
|
|
"sleep: device %s has wake-on-lan, skipping",
|
|
|
|
|
nm_device_get_ip_iface(device));
|
2014-04-03 15:14:00 -04:00
|
|
|
continue;
|
2016-05-05 14:14:40 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-05-05 14:14:40 +02:00
|
|
|
if (nm_device_is_activating(device)
|
|
|
|
|
|| nm_device_get_state(device) == NM_DEVICE_STATE_ACTIVATED) {
|
|
|
|
|
_LOGD(LOGD_SUSPEND,
|
|
|
|
|
"sleep: wait disconnection of device %s",
|
|
|
|
|
nm_device_get_ip_iface(device));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-05-05 14:14:40 +02:00
|
|
|
if (sleep_devices_add(self, device, suspending))
|
|
|
|
|
nm_device_queue_state(device,
|
|
|
|
|
NM_DEVICE_STATE_DEACTIVATING,
|
|
|
|
|
NM_DEVICE_STATE_REASON_SLEEPING);
|
|
|
|
|
} else {
|
|
|
|
|
nm_device_set_unmanaged_by_flags(device,
|
|
|
|
|
NM_UNMANAGED_SLEEPING,
|
|
|
|
|
TRUE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_SLEEPING);
|
2013-12-17 14:24:39 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2010-05-22 08:55:30 -07:00
|
|
|
} else {
|
2017-11-29 09:42:39 +01:00
|
|
|
_LOGD(LOGD_SUSPEND, "sleep: %s...", waking_from_suspend ? "waking up" : "re-enabling");
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-07-16 11:57:14 +02:00
|
|
|
sleep_devices_clear(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-04-03 15:14:00 -04:00
|
|
|
if (waking_from_suspend) {
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2014-04-03 15:14:00 -04:00
|
|
|
if (nm_device_is_software(device))
|
|
|
|
|
continue;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-11-16 11:46:29 +01:00
|
|
|
/* Belatedly take down Wake-on-LAN devices; ideally we wouldn't have to do this
|
|
|
|
|
* but for now it's the only way to make sure we re-check their connectivity.
|
|
|
|
|
*/
|
2017-09-29 15:11:33 +02:00
|
|
|
if (device_is_wake_on_lan(priv->platform, device))
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
nm_device_set_unmanaged_by_flags(device,
|
|
|
|
|
NM_UNMANAGED_SLEEPING,
|
|
|
|
|
TRUE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_SLEEPING);
|
2016-11-16 11:46:29 +01:00
|
|
|
|
|
|
|
|
/* Check if the device is unmanaged but the state transition is still pending.
|
|
|
|
|
* If so, change state now so that later we re-manage the device forcing a
|
|
|
|
|
* re-check of available connections.
|
|
|
|
|
*/
|
|
|
|
|
if (!nm_device_get_managed(device, FALSE)
|
|
|
|
|
&& nm_device_get_state(device) != NM_DEVICE_STATE_UNMANAGED) {
|
|
|
|
|
nm_device_state_changed(device,
|
|
|
|
|
NM_DEVICE_STATE_UNMANAGED,
|
|
|
|
|
NM_DEVICE_STATE_REASON_SLEEPING);
|
2014-04-03 15:14:00 -04:00
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2009-11-24 10:43:43 -08:00
|
|
|
/* Ensure rfkill state is up-to-date since we don't respond to state
|
|
|
|
|
* changes during sleep.
|
|
|
|
|
*/
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update(self, NM_RFKILL_TYPE_UNKNOWN);
|
2009-11-24 10:43:43 -08:00
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
/* Re-manage managed devices */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2009-12-23 00:03:45 -08:00
|
|
|
guint i;
|
2009-11-20 09:11:46 -08:00
|
|
|
|
2019-04-23 16:39:17 +02:00
|
|
|
if (nm_device_is_software(device)
|
|
|
|
|
&& !nm_device_get_unmanaged_flags(device, NM_UNMANAGED_SLEEPING)) {
|
|
|
|
|
/* DHCP leases of software devices could have gone stale
|
|
|
|
|
* so we need to renew them. */
|
2022-08-24 16:50:14 +02:00
|
|
|
nm_device_update_dynamic_ip_setup(device, "wake up");
|
2013-12-17 14:24:39 -05:00
|
|
|
continue;
|
2016-03-26 01:26:36 -04:00
|
|
|
}
|
2013-12-17 14:24:39 -05:00
|
|
|
|
2009-11-20 09:11:46 -08:00
|
|
|
/* enable/disable wireless devices since that we don't respond
|
|
|
|
|
* to killswitch changes during sleep.
|
|
|
|
|
*/
|
2022-02-01 20:21:34 +01:00
|
|
|
for (i = 0; i < NM_RFKILL_TYPE_MAX; i++) {
|
2022-02-01 22:19:36 +01:00
|
|
|
const NMRfkillType rtype = i;
|
|
|
|
|
const RfkillRadioState *rstate = &priv->radio_states[rtype];
|
|
|
|
|
gboolean enabled = _rfkill_radio_state_get_enabled(rstate, TRUE);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2022-02-01 22:08:52 +01:00
|
|
|
_LOGD(LOGD_RFKILL,
|
2022-02-16 15:54:44 +02:00
|
|
|
"rfkill: %s %s devices (hw_enabled %d, sw_enabled %d, user_enabled %d, "
|
|
|
|
|
"os_owner %d)",
|
2022-02-01 22:08:52 +01:00
|
|
|
enabled ? "enabling" : "disabling",
|
2022-02-01 22:32:46 +01:00
|
|
|
nm_rfkill_type_to_string(rtype),
|
2022-02-01 22:08:52 +01:00
|
|
|
rstate->hw_enabled,
|
|
|
|
|
rstate->sw_enabled,
|
2022-02-16 15:54:44 +02:00
|
|
|
rstate->user_enabled,
|
|
|
|
|
rstate->os_owner);
|
2022-02-01 22:08:52 +01:00
|
|
|
if (nm_device_get_rfkill_type(device) == rtype)
|
2011-11-17 23:38:08 -06:00
|
|
|
nm_device_set_enabled(device, enabled);
|
2009-12-23 00:03:45 -08:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
device: remove default-unmanaged and refactor unmanaged flags
Get rid of NM_UNMANAGED_DEFAULT and refine the interaction between
unmanaged flags, device state and managed property.
Previously, the NM_UNMANAGED_DEFAULT was special in that a device was
still considered managed if it had solely the NM_UNMANAGED_DEFAULT flag
set and its state was managed. Thus, whether the device (state) was managed,
depended on the device state too.
Now, a device is considered managed (or unmanaged) based on the unmanaged
flags and realization state alone. At the same time, the device state
directly corresponds to the managed property of the device. Of course,
while changing the unmanaged flags, that invariant is shortly violated
until the state transistion is complete.
Introduce more unmanaged flags whereas some of them are non-authorative.
For example, the EXTERNAL_DOWN flag has only effect as long as the user
didn't explicitly manage the device (NM_UNMANAGED_USER_EXPLICIT). In other
words, certain flags can render other flags ineffective. Whether the device
is considered managed depends on the flags but also at the explicitly unset flags.
In a way, this is similar to previous where NM_UNMANAGED_DEFAULT was ignored
(if no other flags were present).
Also, previously a device that was NM_UNMANAGED_DEFAULT and in disconnected
state would transition back to unmanaged. No longer do that. Once a device is
managed, it stays managed as long as the flags indicate it should be managed.
However, the user can also modify the unmanaged flags via the D-Bus API.
Also get rid or nm_device_finish_init(). That was previously called
by NMManager after add_device(). As we now realize devices (possibly
multiple times) this should be handled during realization.
https://bugzilla.gnome.org/show_bug.cgi?id=746566
2015-09-15 15:35:16 +02:00
|
|
|
nm_device_set_unmanaged_by_flags(device,
|
|
|
|
|
NM_UNMANAGED_SLEEPING,
|
|
|
|
|
FALSE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_NOW_MANAGED);
|
2007-02-08 15:34:26 +00:00
|
|
|
}
|
2022-07-11 16:07:09 +02:00
|
|
|
|
|
|
|
|
/* Give the connections a chance to recreate the virtual devices.
|
2022-10-11 18:23:15 +02:00
|
|
|
* We've torn them down on sleep. */
|
2022-07-11 16:07:09 +02:00
|
|
|
connections_changed(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
|
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
nm_manager_update_state(self);
|
2010-05-22 08:55:30 -07:00
|
|
|
}
|
|
|
|
|
|
2010-05-29 23:11:45 -07:00
|
|
|
static void
|
|
|
|
|
_internal_sleep(NMManager *self, gboolean do_sleep)
|
2010-05-22 08:55:30 -07:00
|
|
|
{
|
2016-04-27 18:26:39 +02:00
|
|
|
NMManagerPrivate *priv;
|
|
|
|
|
|
|
|
|
|
g_return_if_fail(NM_IS_MANAGER(self));
|
|
|
|
|
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE(self);
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2010-08-31 15:45:55 -05:00
|
|
|
if (priv->sleeping == do_sleep)
|
|
|
|
|
return;
|
|
|
|
|
|
2017-11-29 09:42:39 +01:00
|
|
|
_LOGI(LOGD_SUSPEND,
|
|
|
|
|
"sleep: %s requested (sleeping: %s enabled: %s)",
|
2016-03-02 11:38:26 +01:00
|
|
|
do_sleep ? "sleep" : "wake",
|
|
|
|
|
priv->sleeping ? "yes" : "no",
|
|
|
|
|
priv->net_enabled ? "yes" : "no");
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2010-05-28 13:06:14 -07:00
|
|
|
priv->sleeping = do_sleep;
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2014-04-03 15:14:00 -04:00
|
|
|
do_sleep_wake(self, TRUE);
|
2009-10-20 15:25:04 -07:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify(self, PROP_SLEEPING);
|
2010-05-29 23:11:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_sleep(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER(obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
GError *error = NULL;
|
2015-07-14 10:26:54 +02:00
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
gboolean do_sleep;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get(parameters, "(b)", &do_sleep);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-12-19 11:30:38 +01:00
|
|
|
subject = nm_dbus_manager_new_auth_subject_from_context(invocation);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2010-05-29 23:11:45 -07:00
|
|
|
if (priv->sleeping == do_sleep) {
|
|
|
|
|
error = g_error_new(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_ALREADY_ASLEEP_OR_AWAKE,
|
|
|
|
|
"Already %s",
|
|
|
|
|
do_sleep ? "asleep" : "awake");
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_control_op(NM_AUDIT_OP_SLEEP_CONTROL,
|
|
|
|
|
do_sleep ? "on" : "off",
|
|
|
|
|
FALSE,
|
|
|
|
|
subject,
|
|
|
|
|
error->message);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error(invocation, error);
|
2010-05-29 23:11:45 -07:00
|
|
|
return;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2010-10-15 10:28:38 -05:00
|
|
|
/* Unconditionally allow the request. Previously it was polkit protected
|
|
|
|
|
* but unfortunately that doesn't work for short-lived processes like
|
|
|
|
|
* pm-utils. It uses dbus-send without --print-reply, which quits
|
|
|
|
|
* immediately after sending the request, and NM is unable to obtain the
|
|
|
|
|
* sender's UID as dbus-send has already dropped off the bus. Thus NM
|
|
|
|
|
* fails the request. Instead, don't validate the request, but rely on
|
|
|
|
|
* D-Bus permissions to restrict the call to root.
|
|
|
|
|
*/
|
|
|
|
|
_internal_sleep(self, do_sleep);
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_control_op(NM_AUDIT_OP_SLEEP_CONTROL,
|
|
|
|
|
do_sleep ? "on" : "off",
|
|
|
|
|
TRUE,
|
|
|
|
|
subject,
|
|
|
|
|
NULL);
|
2022-07-11 16:06:14 +02:00
|
|
|
|
|
|
|
|
priv->sleep_invocations = g_slist_prepend(priv->sleep_invocations, invocation);
|
|
|
|
|
sleep_devices_check_empty(self);
|
|
|
|
|
|
2010-10-15 10:28:38 -05:00
|
|
|
return;
|
2007-02-12 09:23:43 +00:00
|
|
|
}
|
|
|
|
|
|
2010-08-31 15:45:55 -05:00
|
|
|
static void
|
2016-04-27 18:21:28 +02:00
|
|
|
sleeping_cb(NMSleepMonitor *monitor, gboolean is_about_to_suspend, gpointer user_data)
|
2010-08-31 15:45:55 -05:00
|
|
|
{
|
2016-04-27 18:26:39 +02:00
|
|
|
NMManager *self = user_data;
|
|
|
|
|
|
2017-11-29 09:42:39 +01:00
|
|
|
_LOGT(LOGD_SUSPEND, "sleep: received %s signal", is_about_to_suspend ? "sleeping" : "resuming");
|
2016-04-27 18:21:28 +02:00
|
|
|
_internal_sleep(self, is_about_to_suspend);
|
2010-08-31 15:45:55 -05:00
|
|
|
}
|
|
|
|
|
|
2010-05-29 23:00:46 -07:00
|
|
|
static void
|
|
|
|
|
_internal_enable(NMManager *self, gboolean enable)
|
2010-05-22 08:55:30 -07:00
|
|
|
{
|
2010-05-29 23:00:46 -07:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2016-04-07 18:42:24 +02:00
|
|
|
nm_config_state_set(priv->config,
|
|
|
|
|
TRUE,
|
|
|
|
|
FALSE,
|
|
|
|
|
NM_CONFIG_STATE_PROPERTY_NETWORKING_ENABLED,
|
|
|
|
|
enable);
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGI(LOGD_SUSPEND,
|
|
|
|
|
"%s requested (sleeping: %s enabled: %s)",
|
|
|
|
|
enable ? "enable" : "disable",
|
|
|
|
|
priv->sleeping ? "yes" : "no",
|
|
|
|
|
priv->net_enabled ? "yes" : "no");
|
2010-05-22 08:55:30 -07:00
|
|
|
|
|
|
|
|
priv->net_enabled = enable;
|
|
|
|
|
|
2014-04-03 15:14:00 -04:00
|
|
|
do_sleep_wake(self, FALSE);
|
2010-05-22 08:55:30 -07:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify(self, PROP_NETWORKING_ENABLED);
|
2010-05-29 23:00:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
enable_net_done_cb(NMAuthChain *chain, GDBusMethodInvocation *context, gpointer user_data)
|
|
|
|
|
{
|
2022-07-11 16:06:14 +02:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMAuthCallResult result;
|
|
|
|
|
gboolean enable;
|
|
|
|
|
NMAuthSubject *subject;
|
2010-05-29 23:00:46 -07:00
|
|
|
|
2019-05-02 10:08:09 +02:00
|
|
|
nm_assert(G_IS_DBUS_METHOD_INVOCATION(context));
|
2013-07-29 11:53:23 -05:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_unlink(nm_auth_chain_parent_lst_list(chain));
|
2015-07-14 10:26:54 +02:00
|
|
|
enable = GPOINTER_TO_UINT(nm_auth_chain_get_data(chain, "enable"));
|
|
|
|
|
subject = nm_auth_chain_get_subject(chain);
|
2010-06-02 02:16:14 -07:00
|
|
|
|
2012-10-08 12:52:15 -05:00
|
|
|
result = nm_auth_chain_get_result(chain, NM_AUTH_PERMISSION_ENABLE_DISABLE_NETWORK);
|
2019-05-04 09:37:54 +02:00
|
|
|
if (result != NM_AUTH_CALL_RESULT_YES) {
|
|
|
|
|
GError *ret_error;
|
|
|
|
|
|
2010-05-29 23:00:46 -07:00
|
|
|
ret_error = g_error_new_literal(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Not authorized to enable/disable networking");
|
2015-07-14 10:26:54 +02:00
|
|
|
nm_audit_log_control_op(NM_AUDIT_OP_NET_CONTROL,
|
|
|
|
|
enable ? "on" : "off",
|
|
|
|
|
FALSE,
|
|
|
|
|
subject,
|
|
|
|
|
ret_error->message);
|
2015-04-15 14:53:30 -04:00
|
|
|
g_dbus_method_invocation_take_error(context, ret_error);
|
2019-05-02 10:08:09 +02:00
|
|
|
return;
|
2012-10-08 12:52:15 -05:00
|
|
|
}
|
|
|
|
|
|
2019-05-02 10:08:09 +02:00
|
|
|
_internal_enable(self, enable);
|
|
|
|
|
nm_audit_log_control_op(NM_AUDIT_OP_NET_CONTROL, enable ? "on" : "off", TRUE, subject, NULL);
|
2022-07-11 16:06:14 +02:00
|
|
|
|
|
|
|
|
priv->sleep_invocations = g_slist_prepend(priv->sleep_invocations, context);
|
|
|
|
|
sleep_devices_check_empty(self);
|
2010-05-29 23:00:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_enable(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(obj);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMAuthChain *chain;
|
|
|
|
|
GError *error = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
gboolean enable;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get(parameters, "(b)", &enable);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2010-05-29 23:00:46 -07:00
|
|
|
if (priv->net_enabled == enable) {
|
|
|
|
|
error = g_error_new(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_ALREADY_ENABLED_OR_DISABLED,
|
|
|
|
|
"Already %s",
|
|
|
|
|
enable ? "enabled" : "disabled");
|
2013-07-29 11:53:23 -05:00
|
|
|
goto done;
|
2010-05-29 23:00:46 -07:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context(invocation, enable_net_done_cb, self);
|
2013-07-29 11:53:23 -05:00
|
|
|
if (!chain) {
|
2010-05-30 08:30:37 -07:00
|
|
|
error = g_error_new_literal(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2019-09-04 10:18:56 +02:00
|
|
|
NM_UTILS_ERROR_MSG_REQ_AUTH_FAILED);
|
2013-07-29 11:53:23 -05:00
|
|
|
goto done;
|
2010-05-30 08:30:37 -07:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_link_tail(&priv->auth_lst_head, nm_auth_chain_parent_lst_list(chain));
|
2013-07-29 11:53:23 -05:00
|
|
|
nm_auth_chain_set_data(chain, "enable", GUINT_TO_POINTER(enable), NULL);
|
|
|
|
|
nm_auth_chain_add_call(chain, NM_AUTH_PERMISSION_ENABLE_DISABLE_NETWORK, TRUE);
|
|
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
if (error)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error(invocation, error);
|
2010-05-22 08:55:30 -07:00
|
|
|
}
|
|
|
|
|
|
2010-05-28 18:23:00 -07:00
|
|
|
/* Permissions */
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
get_permissions_done_cb(NMAuthChain *chain, GDBusMethodInvocation *context, gpointer user_data)
|
|
|
|
|
{
|
2015-04-15 14:53:30 -04:00
|
|
|
GVariantBuilder results;
|
2019-12-05 14:55:28 +01:00
|
|
|
int i;
|
2010-05-28 18:23:00 -07:00
|
|
|
|
2019-05-02 10:08:09 +02:00
|
|
|
nm_assert(G_IS_DBUS_METHOD_INVOCATION(context));
|
2013-07-29 11:53:23 -05:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_unlink(nm_auth_chain_parent_lst_list(chain));
|
2012-10-08 12:52:15 -05:00
|
|
|
|
2019-05-02 10:08:09 +02:00
|
|
|
g_variant_builder_init(&results, G_VARIANT_TYPE("a{ss}"));
|
2012-10-08 12:52:15 -05:00
|
|
|
|
2019-12-05 14:55:28 +01:00
|
|
|
for (i = 0; i < (int) G_N_ELEMENTS(nm_auth_permission_sorted); i++) {
|
|
|
|
|
const char *permission = nm_auth_permission_names_by_idx[nm_auth_permission_sorted[i] - 1];
|
|
|
|
|
NMAuthCallResult result;
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *result_str;
|
2019-12-05 14:55:28 +01:00
|
|
|
|
|
|
|
|
result = nm_auth_chain_get_result(chain, permission);
|
2019-12-05 15:20:18 +01:00
|
|
|
result_str = nm_client_permission_result_to_string(nm_auth_call_result_to_client(result));
|
|
|
|
|
g_variant_builder_add(&results, "{ss}", permission, result_str);
|
2019-12-05 14:55:28 +01:00
|
|
|
}
|
2010-05-28 18:23:00 -07:00
|
|
|
|
2019-05-02 10:08:09 +02:00
|
|
|
g_dbus_method_invocation_return_value(context, g_variant_new("(a{ss})", &results));
|
2010-05-28 18:23:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_get_permissions(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(obj);
|
2010-05-28 18:23:00 -07:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMAuthChain *chain;
|
2019-12-05 14:55:28 +01:00
|
|
|
int i;
|
2010-05-28 18:23:00 -07:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context(invocation, get_permissions_done_cb, self);
|
2013-07-29 11:53:23 -05:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal(invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2019-09-04 10:18:56 +02:00
|
|
|
NM_UTILS_ERROR_MSG_REQ_AUTH_FAILED);
|
2013-07-29 11:53:23 -05:00
|
|
|
return;
|
2012-10-08 12:52:15 -05:00
|
|
|
}
|
2013-07-29 11:53:23 -05:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_link_tail(&priv->auth_lst_head, nm_auth_chain_parent_lst_list(chain));
|
2019-12-05 14:55:28 +01:00
|
|
|
|
|
|
|
|
for (i = 0; i < (int) G_N_ELEMENTS(nm_auth_permission_sorted); i++) {
|
|
|
|
|
const char *permission = nm_auth_permission_names_by_idx[nm_auth_permission_sorted[i] - 1];
|
|
|
|
|
|
|
|
|
|
nm_auth_chain_add_call_unsafe(chain, permission, FALSE);
|
|
|
|
|
}
|
2010-05-28 18:23:00 -07:00
|
|
|
}
|
|
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_state(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
2011-03-08 12:57:35 -06:00
|
|
|
{
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMManager *self = NM_MANAGER(obj);
|
|
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
nm_manager_update_state(self);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_value(
|
|
|
|
|
invocation,
|
2015-04-15 14:53:30 -04:00
|
|
|
g_variant_new("(u)", NM_MANAGER_GET_PRIVATE(self)->state));
|
2011-03-08 12:57:35 -06:00
|
|
|
}
|
|
|
|
|
|
2014-01-22 13:07:24 -06:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_set_logging(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
|
|
|
|
{
|
|
|
|
|
NMManager *self = NM_MANAGER(obj);
|
|
|
|
|
GError *error = NULL;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const char *level;
|
|
|
|
|
const char *domains;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-08-23 09:38:40 +02:00
|
|
|
/* The permission is already enforced by the D-Bus daemon, but we ensure
|
|
|
|
|
* that the caller is still alive so that clients are forced to wait and
|
|
|
|
|
* we'll be able to switch to polkit without breaking behavior.
|
|
|
|
|
*/
|
2018-03-23 21:49:41 +01:00
|
|
|
if (!nm_dbus_manager_ensure_uid(nm_dbus_object_get_manager(NM_DBUS_OBJECT(self)),
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
invocation,
|
|
|
|
|
G_MAXULONG,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED))
|
2016-08-23 09:38:40 +02:00
|
|
|
return;
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get(parameters, "(&s&s)", &level, &domains);
|
|
|
|
|
|
2014-01-22 13:07:24 -06:00
|
|
|
if (nm_logging_setup(level, domains, NULL, &error)) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGI(LOGD_CORE,
|
|
|
|
|
"logging: level '%s' domains '%s'",
|
|
|
|
|
nm_logging_level_to_string(),
|
|
|
|
|
nm_logging_domains_to_string());
|
2010-04-08 08:56:17 -07:00
|
|
|
}
|
2014-01-22 13:07:24 -06:00
|
|
|
|
2015-04-15 14:53:30 -04:00
|
|
|
if (error)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_take_error(invocation, error);
|
2015-04-15 14:53:30 -04:00
|
|
|
else
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_value(invocation, NULL);
|
2010-04-08 08:56:17 -07:00
|
|
|
}
|
2007-08-15 07:52:25 +00:00
|
|
|
|
2012-12-12 14:21:40 +01:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_get_logging(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
{
|
|
|
|
|
g_dbus_method_invocation_return_value(
|
|
|
|
|
invocation,
|
2015-04-15 14:53:30 -04:00
|
|
|
g_variant_new("(ss)", nm_logging_level_to_string(), nm_logging_domains_to_string()));
|
2012-12-12 14:21:40 +01:00
|
|
|
}
|
|
|
|
|
|
2017-03-27 15:22:22 +00:00
|
|
|
typedef struct {
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self;
|
2017-03-27 15:22:22 +00:00
|
|
|
GDBusMethodInvocation *context;
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
guint remaining;
|
2017-03-27 15:22:22 +00:00
|
|
|
} ConnectivityCheckData;
|
|
|
|
|
|
2013-07-30 16:31:31 -04:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
device_connectivity_done(NMDevice *device,
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
NMDeviceConnectivityHandle *handle,
|
|
|
|
|
NMConnectivityState state,
|
2021-11-09 13:28:54 +01:00
|
|
|
GError *error,
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
gpointer user_data)
|
2013-07-30 16:31:31 -04:00
|
|
|
{
|
2017-03-27 15:22:22 +00:00
|
|
|
ConnectivityCheckData *data = user_data;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self;
|
|
|
|
|
NMManagerPrivate *priv;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
nm_assert(data);
|
|
|
|
|
nm_assert(data->remaining > 0);
|
|
|
|
|
nm_assert(NM_IS_MANAGER(data->self));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
data->remaining--;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
self = data->self;
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
if (data->context
|
|
|
|
|
&& (data->remaining == 0
|
|
|
|
|
|| (state == NM_CONNECTIVITY_FULL
|
|
|
|
|
&& priv->connectivity_state == NM_CONNECTIVITY_FULL))) {
|
|
|
|
|
/* despite having a @handle and @state returned by the requests, we always
|
|
|
|
|
* return the current connectivity_state. That is, because the connectivity_state
|
|
|
|
|
* and the answer to the connectivity check shall agree.
|
|
|
|
|
*
|
|
|
|
|
* However, if one of the requests (early) returns full connectivity and agrees with
|
|
|
|
|
* the accumulated connectivity state, we no longer have to wait. The result is set.
|
|
|
|
|
*
|
2018-04-18 14:13:28 +02:00
|
|
|
* This also works well, because NMDevice first emits change signals to its own
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
* connectivity state, which is then taken into account for the accumulated global
|
|
|
|
|
* state. All this happens, before the callback is invoked. */
|
|
|
|
|
g_dbus_method_invocation_return_value(
|
|
|
|
|
g_steal_pointer(&data->context),
|
|
|
|
|
g_variant_new("(u)", (guint) priv->connectivity_state));
|
2017-03-27 15:22:22 +00:00
|
|
|
}
|
|
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
if (data->remaining == 0) {
|
|
|
|
|
g_object_unref(self);
|
2017-03-27 15:22:22 +00:00
|
|
|
g_slice_free(ConnectivityCheckData, data);
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
}
|
2013-07-30 16:31:31 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
check_connectivity_auth_done_cb(NMAuthChain *chain,
|
2015-04-15 14:53:30 -04:00
|
|
|
GDBusMethodInvocation *context,
|
2013-07-30 16:31:31 -04:00
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
GError *error = NULL;
|
2013-07-30 16:31:31 -04:00
|
|
|
NMAuthCallResult result;
|
2017-03-27 15:22:22 +00:00
|
|
|
ConnectivityCheckData *data;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_unlink(nm_auth_chain_parent_lst_list(chain));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-07-30 16:31:31 -04:00
|
|
|
result = nm_auth_chain_get_result(chain, NM_AUTH_PERMISSION_NETWORK_CONTROL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-04 09:37:54 +02:00
|
|
|
if (result != NM_AUTH_CALL_RESULT_YES) {
|
2013-07-30 16:31:31 -04:00
|
|
|
error = g_error_new_literal(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Not authorized to recheck connectivity");
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
}
|
|
|
|
|
if (error) {
|
|
|
|
|
g_dbus_method_invocation_take_error(context, error);
|
2019-05-02 10:08:09 +02:00
|
|
|
return;
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
data = g_slice_new(ConnectivityCheckData);
|
|
|
|
|
data->self = g_object_ref(self);
|
|
|
|
|
data->context = context;
|
|
|
|
|
data->remaining = 0;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2018-07-03 19:20:45 +02:00
|
|
|
if (nm_device_check_connectivity(device, AF_INET, device_connectivity_done, data))
|
|
|
|
|
data->remaining++;
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
if (nm_device_check_connectivity(device, AF_INET6, device_connectivity_done, data))
|
|
|
|
|
data->remaining++;
|
2013-07-30 16:31:31 -04:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
connectivity: rework async connectivity check requests
An asynchronous request should either be cancellable or not keep
the target object alive. Preferably both.
Otherwise, it is impossible to do a controlled shutdown when terminating
NetworkManager. Currently, when NetworkManager is about to terminate,
it just quits the mainloop and essentially leaks everything. That is a
bug. If we ever want to fix that, every asynchronous request must be
cancellable in a controlled way (or it must not prevent objects from
getting disposed, where disposing the object automatically cancels the
callback).
Rework the asynchronous request for connectivity check to
- return a handle that can be used to cancel the operation.
Cancelling is optional. The caller may choose to ignore the handle
because the asynchronous operation does not keep the target object
alive. That means, it is still possible to shutdown, by everybody
giving up their reference to the target object. In which case the
callback will be invoked during dispose() of the target object.
- also, the callback will always be invoked exactly once, and never
synchronously from within the asynchronous start call. But during
cancel(), the callback is invoked synchronously from within cancel().
Note that it's only allowed to cancel an action at most once, and
never after the callback is invoked (also not from within the callback
itself).
- also, NMConnectivity already supports a fake handler, in case
connectivity check is disabled via configuration. Hence, reuse
the same code paths also when compiling without --enable-concheck.
That means, instead of having #if WITH_CONCHECK at various callers,
move them into NMConnectivity. The downside is, that if you build
without concheck, there is a small overhead compared to before. The
upside is, we reuse the same code paths when compiling with or without
concheck.
- also, the patch synchronizes the connecitivty states. For example,
previously `nmcli networking connectivity check` would schedule
requests in parallel, and return the accumulated result of the individual
requests.
However, the global connectivity state of the manager might have have
been the same as the answer to the explicit connecitivity check,
because while the answer for the manual check is waiting for all
pending checks to complete, the global connectivity state could
already change. That is just wrong. There are not multiple global
connectivity states at the same time, there is just one. A manual
connectivity check should have the meaning of ensure that the global
state is up to date, but it still should return the global
connectivity state -- not the answers for several connectivity checks
issued in parallel.
This is related to commit b799de281bc01073c31dd2c86171b29c8132441c
(libnm: update property in the manager after connectivity check),
which tries to address a similar problem client side.
Similarly, each device has a connectivity state. While there might
be several connectivity checks per device pending, whenever a check
completes, it can update the per-device state (and return that device
state as result), but the immediate answer of the individual check
might not matter. This is especially the case, when a later request
returns earlier and obsoletes all earlier requests. In that case,
earlier requests return with the result of the currend devices
connectivity state.
This patch cleans up the internal API and gives a better defined behavior
to the user (thus, the simple API which simplifies implementation for the
caller). However, the implementation of getting this API right and properly
handle cancel and destruction of the target object is more complicated and
complex. But this but is not just for the sake of a nicer API. This fixes
actual issues explained above.
Also, get rid of GAsyncResult to track information about the pending request.
Instead, allocate our own handle structure, which ends up to be nicer
because it's strongly typed and has exactly the properties that are
useful to track the request. Also, it gets rid of the awkward
_finish() API by passing the relevant arguments to the callback
directly.
2018-01-05 17:46:49 +01:00
|
|
|
if (data->remaining == 0) {
|
|
|
|
|
/* call the handler at least once. */
|
|
|
|
|
data->remaining = 1;
|
|
|
|
|
device_connectivity_done(NULL, NULL, NM_CONNECTIVITY_UNKNOWN, NULL, data);
|
|
|
|
|
/* @data got destroyed. */
|
|
|
|
|
}
|
2013-07-30 16:31:31 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_check_connectivity(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(obj);
|
2015-04-15 14:53:30 -04:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMAuthChain *chain;
|
2013-07-30 16:31:31 -04:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context(invocation, check_connectivity_auth_done_cb, self);
|
2013-07-29 11:53:23 -05:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal(invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2019-09-04 10:18:56 +02:00
|
|
|
NM_UTILS_ERROR_MSG_REQ_AUTH_FAILED);
|
2013-07-29 11:53:23 -05:00
|
|
|
return;
|
2013-07-30 16:31:31 -04:00
|
|
|
}
|
2013-07-29 11:53:23 -05:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_link_tail(&priv->auth_lst_head, nm_auth_chain_parent_lst_list(chain));
|
2013-07-29 11:53:23 -05:00
|
|
|
nm_auth_chain_add_call(chain, NM_AUTH_PERMISSION_NETWORK_CONTROL, TRUE);
|
2013-07-30 16:31:31 -04:00
|
|
|
}
|
|
|
|
|
|
2014-09-17 14:17:30 -05:00
|
|
|
static void
|
|
|
|
|
start_factory(NMDeviceFactory *factory, gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
nm_device_factory_start(factory);
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
gboolean
|
2020-03-04 13:38:49 +01:00
|
|
|
nm_manager_write_device_state(NMManager *self, NMDevice *device, int *out_ifindex)
|
2016-09-23 17:36:21 +02:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2018-07-06 21:09:58 +02:00
|
|
|
int ifindex;
|
|
|
|
|
gboolean managed;
|
|
|
|
|
NMConfigDeviceStateManagedType managed_type;
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *uuid = NULL;
|
|
|
|
|
const char *perm_hw_addr_fake = NULL;
|
2018-07-06 21:09:58 +02:00
|
|
|
gboolean perm_hw_addr_is_fake;
|
|
|
|
|
guint32 route_metric_default_aspired;
|
|
|
|
|
guint32 route_metric_default_effective;
|
2020-05-06 23:03:10 +02:00
|
|
|
NMTernary nm_owned;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-03-04 13:38:49 +01:00
|
|
|
NM_SET_OUT(out_ifindex, 0);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
ifindex = nm_device_get_ip_ifindex(device);
|
|
|
|
|
if (ifindex <= 0)
|
|
|
|
|
return FALSE;
|
2022-06-12 19:50:09 -04:00
|
|
|
if (ifindex == NM_LOOPBACK_IFINDEX) {
|
2018-07-06 21:09:58 +02:00
|
|
|
/* ignore loopback */
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
if (!nm_platform_link_get(priv->platform, ifindex))
|
|
|
|
|
return FALSE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
managed = nm_device_get_managed(device, FALSE);
|
|
|
|
|
if (managed) {
|
2018-09-18 18:14:27 +02:00
|
|
|
NMSettingsConnection *sett_conn = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-04 19:19:38 +02:00
|
|
|
if (nm_device_get_state(device) <= NM_DEVICE_STATE_ACTIVATED)
|
|
|
|
|
sett_conn = nm_device_get_settings_connection(device);
|
settings: use delegation instead of inheritance for NMSettingsConnection and NMConnection
NMConnection is an interface, which is implemented by the types
NMSimpleConnection (libnm-core), NMSettingsConnection (src) and
NMRemoteConnection (libnm).
NMSettingsConnection does a lot of things already:
1) it "is-a" NMDBusObject and exports the API of a connection profile
on D-Bus
2) it interacts with NMSettings and contains functionality
for tracking the profiles.
3) it is the base-class of types like NMSKeyfileConnection and
NMIfcfgConnection. These handle how the profile is persisted
on disk.
4) it implements NMConnection interface, to itself track the
settings of the profile.
3) and 4) would be better implemented via delegation than inheritance.
Address 4) and don't let NMSettingsConnection implemente the NMConnection
interface. Instead, a settings-connection references now a NMSimpleConnection
instance, to which it delegates for keeping the actual profiles.
Advantages:
- by delegating, there is a clearer separation of what
NMSettingsConnection does. For example, in C we often required
casts from NMSettingsConnection to NMConnection. NMConnection
is a very trivial object with very little logic. When we have
a NMConnection instance at hand, it's good to know that it is
*only* that simple instead of also being an entire
NMSettingsConnection instance.
The main purpose of this patch is to simplify the code by separating
the NMConnection from the NMSettingsConnection. We should generally
be aware whether we handle a NMSettingsConnection or a trivial
NMConnection instance. Now, because NMSettingsConnection no longer
"is-a" NMConnection, this distinction is apparent.
- NMConnection is implemented as an interface and we create
NMSimpleConnection instances whenever we need a real instance.
In GLib, interfaces have a performance overhead, that we needlessly
pay all the time. With this change, we no longer require
NMConnection to be an interface. Thus, in the future we could compile
a version of libnm-core for the daemon, where NMConnection is not an
interface but a GObject implementation akin to NMSimpleConnection.
- In the previous implementation, we cannot treat NMConnection immutable
and copy-on-write.
For example, when NMDevice needs a snapshot of the activated
profile as applied-connection, all it can do is clone the entire
NMSettingsConnection as a NMSimpleConnection.
Likewise, when we get a NMConnection instance and want to keep
a reference to it, we cannot do that, because we never know
who also references and modifies the instance.
By separating NMSettingsConnection we could in the future have
NMConnection immutable and copy-on-write, to avoid all unnecessary
clones.
2018-08-11 11:08:17 +02:00
|
|
|
if (sett_conn)
|
|
|
|
|
uuid = nm_settings_connection_get_uuid(sett_conn);
|
2018-07-06 21:09:58 +02:00
|
|
|
managed_type = NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_MANAGED;
|
|
|
|
|
} else if (nm_device_get_unmanaged_flags(device, NM_UNMANAGED_USER_EXPLICIT))
|
|
|
|
|
managed_type = NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_UNMANAGED;
|
|
|
|
|
else
|
|
|
|
|
managed_type = NM_CONFIG_DEVICE_STATE_MANAGED_TYPE_UNKNOWN;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
perm_hw_addr_fake =
|
|
|
|
|
nm_device_get_permanent_hw_address_full(device, FALSE, &perm_hw_addr_is_fake);
|
|
|
|
|
if (perm_hw_addr_fake && !perm_hw_addr_is_fake)
|
|
|
|
|
perm_hw_addr_fake = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-05-06 23:03:10 +02:00
|
|
|
nm_owned = nm_device_is_software(device) ? nm_device_is_nm_owned(device) : NM_TERNARY_DEFAULT;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
route_metric_default_effective = _device_route_metric_get(self,
|
|
|
|
|
ifindex,
|
|
|
|
|
NM_DEVICE_TYPE_UNKNOWN,
|
|
|
|
|
TRUE,
|
|
|
|
|
&route_metric_default_aspired);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-03-04 13:38:49 +01:00
|
|
|
if (!nm_config_device_state_write(ifindex,
|
|
|
|
|
managed_type,
|
|
|
|
|
perm_hw_addr_fake,
|
|
|
|
|
uuid,
|
|
|
|
|
nm_owned,
|
|
|
|
|
route_metric_default_aspired,
|
|
|
|
|
route_metric_default_effective,
|
2022-04-19 18:09:49 +02:00
|
|
|
nm_device_get_dhcp_config(device, AF_INET),
|
|
|
|
|
nm_device_get_dhcp_config(device, AF_INET6)))
|
2020-03-04 13:38:49 +01:00
|
|
|
return FALSE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-03-04 13:38:49 +01:00
|
|
|
NM_SET_OUT(out_ifindex, ifindex);
|
|
|
|
|
return TRUE;
|
2018-07-06 21:09:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nm_manager_write_device_state_all(NMManager *self)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2020-03-04 13:21:48 +01:00
|
|
|
gs_unref_hashtable GHashTable *preserve_ifindexes = NULL;
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2017-05-31 16:58:21 +02:00
|
|
|
|
2020-03-04 13:21:48 +01:00
|
|
|
preserve_ifindexes = g_hash_table_new(nm_direct_hash, NULL);
|
2017-12-06 15:51:18 +01:00
|
|
|
|
2018-07-06 21:09:58 +02:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
2020-03-04 13:38:49 +01:00
|
|
|
int ifindex;
|
|
|
|
|
|
|
|
|
|
if (nm_manager_write_device_state(self, device, &ifindex)) {
|
2020-03-04 13:21:48 +01:00
|
|
|
g_hash_table_add(preserve_ifindexes, GINT_TO_POINTER(ifindex));
|
2018-07-06 21:09:58 +02:00
|
|
|
}
|
2016-09-23 17:36:21 +02:00
|
|
|
}
|
|
|
|
|
|
2020-03-04 16:52:57 +01:00
|
|
|
nm_config_device_state_prune_stale(preserve_ifindexes, NULL);
|
2016-09-23 17:36:21 +02:00
|
|
|
}
|
|
|
|
|
|
2017-03-14 10:42:36 +01:00
|
|
|
static gboolean
|
|
|
|
|
devices_inited_cb(gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = user_data;
|
2017-03-14 10:42:36 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
priv->devices_inited_id = 0;
|
2020-02-17 13:24:28 +01:00
|
|
|
priv->devices_inited = TRUE;
|
2017-03-14 10:42:36 +01:00
|
|
|
check_if_startup_complete(self);
|
|
|
|
|
return G_SOURCE_REMOVE;
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-03 09:26:31 -04:00
|
|
|
gboolean
|
|
|
|
|
nm_manager_start(NMManager *self, GError **error)
|
2009-06-11 00:39:12 -04:00
|
|
|
{
|
2022-07-11 16:07:09 +02:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
guint i;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
nm_device_factory_manager_load_factories(_register_device_factory, self);
|
|
|
|
|
|
|
|
|
|
nm_device_factory_manager_for_each_factory(start_factory, NULL);
|
2015-08-03 09:26:31 -04:00
|
|
|
|
2009-12-23 00:03:45 -08:00
|
|
|
/* Set initial radio enabled/disabled state */
|
2022-02-01 20:21:34 +01:00
|
|
|
for (i = 0; i < NM_RFKILL_TYPE_MAX; i++) {
|
2022-02-01 22:19:36 +01:00
|
|
|
const NMRfkillType rtype = i;
|
|
|
|
|
RfkillRadioState *rstate = &priv->radio_states[rtype];
|
|
|
|
|
gboolean enabled;
|
2009-12-23 00:03:45 -08:00
|
|
|
|
2014-04-15 16:25:39 -05:00
|
|
|
/* recheck kernel rfkill state */
|
2022-03-21 10:19:37 +01:00
|
|
|
_rfkill_radio_state_set_from_manager(self, rtype, rstate);
|
2022-02-01 22:08:52 +01:00
|
|
|
|
|
|
|
|
_LOGI(LOGD_RFKILL,
|
|
|
|
|
"rfkill: %s %s by radio killswitch; %s by state file",
|
2022-02-01 22:32:46 +01:00
|
|
|
nm_rfkill_type_to_string(rtype),
|
2022-02-01 22:08:52 +01:00
|
|
|
(rstate->hw_enabled && rstate->sw_enabled) ? "enabled" : "disabled",
|
|
|
|
|
rstate->user_enabled ? "enabled" : "disabled");
|
2022-02-01 22:19:36 +01:00
|
|
|
enabled = _rfkill_radio_state_get_enabled(rstate, TRUE);
|
|
|
|
|
_rfkill_update_devices(self, rtype, enabled);
|
2009-12-23 00:03:45 -08:00
|
|
|
}
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGI(LOGD_CORE, "Networking is %s by state file", priv->net_enabled ? "enabled" : "disabled");
|
2010-04-08 18:23:43 -07:00
|
|
|
|
2010-10-27 15:47:10 -05:00
|
|
|
system_unmanaged_devices_changed_cb(priv->settings, NULL, self);
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
|
2022-01-04 18:28:46 +01:00
|
|
|
_static_hostname_changed_cb(priv->hostname_manager, NULL, self);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
if (!nm_settings_start(priv->settings, error))
|
|
|
|
|
return FALSE;
|
2014-09-05 14:48:21 -05:00
|
|
|
|
2017-09-29 15:11:33 +02:00
|
|
|
nm_platform_process_events(priv->platform);
|
2017-03-14 12:14:21 +01:00
|
|
|
|
2017-09-29 15:11:33 +02:00
|
|
|
g_signal_connect(priv->platform,
|
2017-03-14 12:14:21 +01:00
|
|
|
NM_PLATFORM_SIGNAL_LINK_CHANGED,
|
|
|
|
|
G_CALLBACK(platform_link_cb),
|
|
|
|
|
self);
|
|
|
|
|
|
2015-05-04 16:54:51 +02:00
|
|
|
platform_query_devices(self);
|
2011-10-18 13:48:44 +02:00
|
|
|
|
core: better order the code at startup
NM was calling nm_bus_manager_start_service() to claim its bus name
before it exported any of its objects, but this didn't matter under
dbus-glib, because no client connections would be accepted until the
main loop was started later on, by which point we would have exported
everything.
But with gdbus, method calls are initially received in the gdbus
worker thread, which means that clients would be able to connect right
away and then be told that the expected interfaces don't exist.
So move the nm_bus_manager_start_service() call to occur after
creating NMSettings and NMManager (and, indirectly, NMAgentManager).
This requires splitting out the slow parts of nm_settings_new() into a
new nm_settings_start(), so that we can create and export it first,
and then read the connections, etc afterward. (Likewise, there were
still a few potentially-slow bits in nm_manager_new() which are now
moved into nm_manager_start().)
2015-07-31 13:00:22 -04:00
|
|
|
/* Load VPN plugins */
|
|
|
|
|
priv->vpn_manager = g_object_ref(nm_vpn_manager_get());
|
|
|
|
|
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD(LOGD_CORE, "creating virtual devices...");
|
settings: rework tracking settings connections and settings plugins
Completely rework how settings plugin handle connections and how
NMSettings tracks the list of connections.
Previously, settings plugins would return objects of (a subtype of) type
NMSettingsConnection. The NMSettingsConnection was tightly coupled with
the settings plugin. That has a lot of downsides.
Change that. When changing this basic relation how settings connections
are tracked, everything falls appart. That's why this is a huge change.
Also, since I have to largely rewrite the settings plugins, I also
added support for multiple keyfile directories, handle in-memory
connections only by keyfile plugin and (partly) use copy-on-write NMConnection
instances. I don't want to spend effort rewriting large parts while
preserving the old way, that anyway should change. E.g. while rewriting ifcfg-rh,
I don't want to let it handle in-memory connections because that's not right
long-term.
--
If the settings plugins themself create subtypes of NMSettingsConnection
instances, then a lot of knowledge about tracking connections moves
to the plugins.
Just try to follow the code what happend during nm_settings_add_connection().
Note how the logic is spread out:
- nm_settings_add_connection() calls plugin's add_connection()
- add_connection() creates a NMSettingsConnection subtype
- the plugin has to know that it's called during add-connection and
not emit NM_SETTINGS_PLUGIN_CONNECTION_ADDED signal
- NMSettings calls claim_connection() which hocks up the new
NMSettingsConnection instance and configures the instance
(like calling nm_settings_connection_added()).
This summary does not sound like a lot, but try to follow that code. The logic
is all over the place.
Instead, settings plugins should have a very simple API for adding, modifying,
deleting, loading and reloading connections. All the plugin does is to return a
NMSettingsStorage handle. The storage instance is a handle to identify a profile
in storage (e.g. a particular file). The settings plugin is free to subtype
NMSettingsStorage, but it's not necessary.
There are no more events raised, and the settings plugin implements the small
API in a straightforward manner.
NMSettings now drives all of this. Even NMSettingsConnection has now
very little concern about how it's tracked and delegates only to NMSettings.
This should make settings plugins simpler. Currently settings plugins
are so cumbersome to implement, that we avoid having them. It should not be
like that and it should be easy, beneficial and lightweight to create a new
settings plugin.
Note also how the settings plugins no longer care about duplicate UUIDs.
Duplicated UUIDs are a fact of life and NMSettings must handle them. No
need to overly concern settings plugins with that.
--
NMSettingsConnection is exposed directly on D-Bus (being a subtype of
NMDBusObject) but it was also a GObject type provided by the settings
plugin. Hence, it was not possible to migrate a profile from one plugin to
another.
However that would be useful when one profile does not support a
connection type (like ifcfg-rh not supporting VPN). Currently such
migration is not implemented except for migrating them to/from keyfile's
run directory. The problem is that migrating profiles in general is
complicated but in some cases it is important to do.
For example checkpoint rollback should recreate the profile in the right
settings plugin, not just add it to persistent storage. This is not yet
properly implemented.
--
Previously, both keyfile and ifcfg-rh plugin implemented in-memory (unsaved)
profiles, while ifupdown plugin cannot handle them. That meant duplication of code
and a ifupdown profile could not be modified or made unsaved.
This is now unified and only keyfile plugin handles in-memory profiles (bgo #744711).
Also, NMSettings is aware of such profiles and treats them specially.
In particular, NMSettings drives the migration between persistent and non-persistent
storage.
Note that a settings plugins may create truly generated, in-memory profiles.
The settings plugin is free to generate and persist the profiles in any way it
wishes. But the concept of "unsaved" profiles is now something explicitly handled
by keyfile plugin. Also, these "unsaved" keyfile profiles are persisted to file system
too, to the /run directory. This is great for two reasons: first of all, all
profiles from keyfile storage in fact have a backing file -- even the
unsaved ones. It also means you can create "unsaved" profiles in /run
and load them with `nmcli connection load`, meaning there is a file
based API for creating unsaved profiles.
The other advantage is that these profiles now survive restarting
NetworkManager. It's paramount that restarting the daemon is as
non-disruptive as possible. Persisting unsaved files to /run improves
here significantly.
--
In the past, NMSettingsConnection also implemented NMConnection interface.
That was already changed a while ago and instead users call now
nm_settings_connection_get_connection() to delegate to a
NMSimpleConnection. What however still happened was that the NMConnection
instance gets never swapped but instead the instance was modified with
nm_connection_replace_settings_from_connection(), clear-secrets, etc.
Change that and treat the NMConnection instance immutable. Instead of modifying
it, reference/clone a new instance. This changes that previously when somebody
wanted to keep a reference to an NMConnection, then the profile would be cloned.
Now, it is supposed to be safe to reference the instance directly and everybody
must ensure not to modify the instance. nmtst_connection_assert_unchanging()
should help with that.
The point is that the settings plugins may keep references to the
NMConnection instance, and so does the NMSettingsConnection. We want
to avoid cloning the instances as long as they are the same.
Likewise, the device's applied connection can now also be referenced
instead of cloning it. This is not yet done, and possibly there are
further improvements possible.
--
Also implement multiple keyfile directores /usr/lib, /etc, /run (rh #1674545,
bgo #772414).
It was always the case that multiple files could provide the same UUID
(both in case of keyfile and ifcfg-rh). For keyfile plugin, if a profile in
read-only storage in /usr/lib gets modified, then it gets actually stored in
/etc (or /run, if the profile is unsaved).
--
While at it, make /etc/network/interfaces profiles for ifupdown plugin reloadable.
--
https://bugzilla.gnome.org/show_bug.cgi?id=772414
https://bugzilla.gnome.org/show_bug.cgi?id=744711
https://bugzilla.redhat.com/show_bug.cgi?id=1674545
2019-06-13 17:12:20 +02:00
|
|
|
g_signal_connect(priv->settings,
|
|
|
|
|
NM_SETTINGS_SIGNAL_CONNECTION_ADDED,
|
|
|
|
|
G_CALLBACK(connection_added_cb),
|
|
|
|
|
self);
|
|
|
|
|
g_signal_connect(priv->settings,
|
|
|
|
|
NM_SETTINGS_SIGNAL_CONNECTION_UPDATED,
|
|
|
|
|
G_CALLBACK(connection_updated_cb),
|
|
|
|
|
self);
|
2022-07-11 16:07:09 +02:00
|
|
|
|
|
|
|
|
/* Make sure virtual devices for all connections are created so
|
|
|
|
|
* that they could be autoconnected. */
|
|
|
|
|
connections_changed(self);
|
core: only manage those bridges created by NetworkManager (rh #905035)
Until we handle bridges non-destructively, only manage bridges
created by NM. When quitting write out a file listing all
bridges created by NM and a timestamp, and when starting read
that file and if the timestamp is within 30 minutes, manage
any bridge that was listed in that file. This scheme, while
not foolproof (eg, if NM crashes), should ensure that NM can
recognize bridges it created if it's restarted. The file
is stored in /run or /var/run, which is cleaned each restart,
ensuring that the state does not persist across reboots.
If an automatic or user-initiated activation request for
a bridge NM does not manage is received, that request is
denied. Only if the bridge interface does not yet exist, or
was present in the managed bridges file, will an
NMDeviceBridge be created and activation be possible.
2013-02-01 18:03:11 -06:00
|
|
|
|
2020-02-17 13:24:28 +01:00
|
|
|
nm_clear_g_source(&priv->devices_inited_id);
|
|
|
|
|
priv->devices_inited_id = g_idle_add_full(G_PRIORITY_LOW + 10, devices_inited_cb, self, NULL);
|
2015-08-03 09:26:31 -04:00
|
|
|
|
|
|
|
|
return TRUE;
|
2009-06-11 00:39:12 -04:00
|
|
|
}
|
|
|
|
|
|
2014-10-29 09:12:18 -05:00
|
|
|
void
|
|
|
|
|
nm_manager_stop(NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2014-10-29 09:12:18 -05:00
|
|
|
|
2018-04-18 10:23:22 +02:00
|
|
|
/* FIXME(shutdown): we don't do a proper shutdown yet:
|
|
|
|
|
* - need to ensure that all pending async operations are cancelled
|
|
|
|
|
* - e.g. operations in priv->async_op_lst_head
|
|
|
|
|
* - need to ensure that no more asynchronous requests are started,
|
|
|
|
|
* or that they complete quickly, or that they fail quickly.
|
|
|
|
|
* - note that cancelling some operations is not possible synchronously.
|
|
|
|
|
* Hence, stop() only prepares shutdown and tells everybody to not
|
|
|
|
|
* accept new work, and to complete in a timely manner.
|
|
|
|
|
* We need to still iterate the mainloop for a bit, to give everybody
|
|
|
|
|
* the chance to complete.
|
|
|
|
|
* - e.g. see comment at nm_auth_manager_force_shutdown()
|
|
|
|
|
*/
|
|
|
|
|
|
core/dbus: stop NMDBusManager and reject future method calls
During shutdown, we will need to still iterate the main loop
to do a coordinated shutdown. Currently we do not, and we just
exit, leaving a lot of objects hanging.
If we are going to fix that, we need during shutdown tell
NMDBusManager to reject all future operations.
Note that property getters and "GetManagerObjects" call is not
blocked. It continues to work.
Certainly for some operations, we want to allow them to be called even
during shutdown. However, these have to opt-in.
This also fixes an uglyness, where nm_dbus_manager_start() would
get the set-property-handler and the @manager as user-data. However,
NMDBusManager will always outlife NMManager, hence, after NMManager
is destroyed, the user-data would be a dangling pointer. Currently
that is not an issue, because
- we always leak NMManager
- we don't run the mainloop during shutdown
2018-04-21 13:25:57 +02:00
|
|
|
nm_dbus_manager_stop(nm_dbus_object_get_manager(NM_DBUS_OBJECT(self)));
|
|
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
while ((device = c_list_first_entry(&priv->devices_lst_head, NMDevice, devices_lst)))
|
2019-05-17 19:22:19 +02:00
|
|
|
remove_device(self, device, TRUE);
|
2016-01-22 15:19:06 +01:00
|
|
|
|
|
|
|
|
_active_connection_cleanup(self);
|
2017-03-14 10:42:36 +01:00
|
|
|
|
|
|
|
|
nm_clear_g_source(&priv->devices_inited_id);
|
2014-10-29 09:12:18 -05:00
|
|
|
}
|
|
|
|
|
|
2010-07-01 10:32:11 -07:00
|
|
|
static gboolean
|
|
|
|
|
handle_firmware_changed(gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
2010-07-01 10:32:11 -07:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMDevice *device;
|
2010-07-01 10:32:11 -07:00
|
|
|
|
|
|
|
|
priv->fw_changed_id = 0;
|
|
|
|
|
|
|
|
|
|
/* Try to re-enable devices with missing firmware */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_for_each_entry (device, &priv->devices_lst_head, devices_lst) {
|
|
|
|
|
NMDeviceState state = nm_device_get_state(device);
|
2010-07-01 10:32:11 -07:00
|
|
|
|
|
|
|
|
if (nm_device_get_firmware_missing(device) && (state == NM_DEVICE_STATE_UNAVAILABLE)) {
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
_LOG2I(LOGD_CORE, device, "firmware may now be available");
|
2010-07-01 10:32:11 -07:00
|
|
|
|
|
|
|
|
/* Re-set unavailable state to try bringing the device up again */
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
nm_device_state_changed(device,
|
2010-07-01 10:32:11 -07:00
|
|
|
NM_DEVICE_STATE_UNAVAILABLE,
|
|
|
|
|
NM_DEVICE_STATE_REASON_NONE);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
firmware_dir_changed(GFileMonitor *monitor,
|
|
|
|
|
GFile *file,
|
|
|
|
|
GFile *other_file,
|
2010-07-01 10:32:11 -07:00
|
|
|
GFileMonitorEvent event_type,
|
|
|
|
|
gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
2010-07-01 10:32:11 -07:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2010-07-01 10:32:11 -07:00
|
|
|
switch (event_type) {
|
|
|
|
|
case G_FILE_MONITOR_EVENT_CREATED:
|
|
|
|
|
case G_FILE_MONITOR_EVENT_CHANGED:
|
|
|
|
|
case G_FILE_MONITOR_EVENT_MOVED:
|
|
|
|
|
case G_FILE_MONITOR_EVENT_ATTRIBUTE_CHANGED:
|
|
|
|
|
case G_FILE_MONITOR_EVENT_CHANGES_DONE_HINT:
|
|
|
|
|
if (!priv->fw_changed_id) {
|
|
|
|
|
priv->fw_changed_id = g_timeout_add_seconds(4, handle_firmware_changed, self);
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGI(LOGD_CORE, "kernel firmware directory '%s' changed", KERNEL_FIRMWARE_DIR);
|
2010-07-01 10:32:11 -07:00
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-03 09:15:24 +02:00
|
|
|
static void
|
|
|
|
|
connection_metered_changed(GObject *object, NMMetered metered, gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
nm_manager_update_metered(NM_MANAGER(user_data));
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-22 13:06:51 -04:00
|
|
|
static void
|
2018-06-28 18:05:05 +02:00
|
|
|
policy_default_ac_changed(GObject *object, GParamSpec *pspec, gpointer user_data)
|
2013-08-22 13:06:51 -04:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2013-08-22 13:06:51 -04:00
|
|
|
NMActiveConnection *ac;
|
|
|
|
|
|
|
|
|
|
/* Note: this assumes that it's not possible for the IP4 default
|
|
|
|
|
* route to be going over the default-ip6-device. If that changes,
|
|
|
|
|
* we need something more complicated here.
|
|
|
|
|
*/
|
2018-06-28 18:05:05 +02:00
|
|
|
ac = nm_policy_get_default_ip4_ac(priv->policy);
|
|
|
|
|
if (!ac)
|
|
|
|
|
ac = nm_policy_get_default_ip6_ac(priv->policy);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-22 13:06:51 -04:00
|
|
|
if (ac != priv->primary_connection) {
|
2015-06-03 09:15:24 +02:00
|
|
|
if (priv->primary_connection) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func(priv->primary_connection,
|
|
|
|
|
G_CALLBACK(connection_metered_changed),
|
|
|
|
|
self);
|
|
|
|
|
g_clear_object(&priv->primary_connection);
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-08-22 13:06:51 -04:00
|
|
|
priv->primary_connection = ac ? g_object_ref(ac) : NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-06-03 09:15:24 +02:00
|
|
|
if (priv->primary_connection) {
|
2018-06-28 18:05:05 +02:00
|
|
|
g_signal_connect(priv->primary_connection,
|
|
|
|
|
NM_ACTIVE_CONNECTION_DEVICE_METERED_CHANGED,
|
2015-06-03 09:15:24 +02:00
|
|
|
G_CALLBACK(connection_metered_changed),
|
|
|
|
|
self);
|
|
|
|
|
}
|
2018-06-28 18:05:05 +02:00
|
|
|
_LOGD(LOGD_CORE,
|
|
|
|
|
"PrimaryConnection now %s",
|
|
|
|
|
ac ? nm_active_connection_get_settings_connection_id(ac) : "(none)");
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify(self, PROP_PRIMARY_CONNECTION);
|
|
|
|
|
_notify(self, PROP_PRIMARY_CONNECTION_TYPE);
|
2015-06-03 09:15:24 +02:00
|
|
|
nm_manager_update_metered(self);
|
2013-08-22 13:06:51 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2018-06-28 18:05:05 +02:00
|
|
|
policy_activating_ac_changed(GObject *object, GParamSpec *pspec, gpointer user_data)
|
2013-08-22 13:06:51 -04:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2018-06-28 18:05:05 +02:00
|
|
|
NMActiveConnection *activating, *best;
|
2013-08-22 13:06:51 -04:00
|
|
|
|
2018-06-28 18:05:05 +02:00
|
|
|
/* We only look at activating-ip6-ac if activating-ip4-ac
|
|
|
|
|
* AND default-ip4-ac are NULL; if default-ip4-ac is
|
|
|
|
|
* non-NULL, then activating-ip6-ac is irrelevant, since while
|
|
|
|
|
* that AC might become the new default-ip6-ac, it can't
|
|
|
|
|
* become primary-connection while default-ip4-ac is set to
|
2013-08-22 13:06:51 -04:00
|
|
|
* something else.
|
|
|
|
|
*/
|
2018-06-28 18:05:05 +02:00
|
|
|
activating = nm_policy_get_activating_ip4_ac(priv->policy);
|
|
|
|
|
best = nm_policy_get_default_ip4_ac(priv->policy);
|
2013-08-22 13:06:51 -04:00
|
|
|
if (!activating && !best)
|
2018-06-28 18:05:05 +02:00
|
|
|
activating = nm_policy_get_activating_ip6_ac(priv->policy);
|
2013-08-22 13:06:51 -04:00
|
|
|
|
2018-06-28 18:05:05 +02:00
|
|
|
if (nm_g_object_ref_set(&priv->activating_connection, activating)) {
|
|
|
|
|
_LOGD(LOGD_CORE,
|
|
|
|
|
"ActivatingConnection now %s",
|
|
|
|
|
activating ? nm_active_connection_get_settings_connection_id(activating) : "(none)");
|
2016-04-01 17:34:51 +02:00
|
|
|
_notify(self, PROP_ACTIVATING_CONNECTION);
|
2013-08-22 13:06:51 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
/*****************************************************************************/
|
2015-04-15 14:53:30 -04:00
|
|
|
|
|
|
|
|
typedef struct {
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self;
|
|
|
|
|
NMDBusObject *obj;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info;
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusPropertyInfoExtended *property_info;
|
|
|
|
|
GVariant *value;
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
guint64 export_version_id;
|
|
|
|
|
} DBusSetPropertyHandle;
|
2015-04-15 14:53:30 -04:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
#define NM_PERM_DENIED_ERROR "org.freedesktop.NetworkManager.PermissionDenied"
|
2010-08-25 15:12:32 -05:00
|
|
|
|
2021-04-12 10:04:19 +02:00
|
|
|
static const char *
|
|
|
|
|
_dbus_set_property_audit_log_get_args(NMDBusObject *obj,
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *property_name,
|
|
|
|
|
GVariant *value,
|
|
|
|
|
char **str_to_free)
|
2021-04-12 10:04:19 +02:00
|
|
|
{
|
|
|
|
|
nm_assert(str_to_free && !*str_to_free);
|
|
|
|
|
|
|
|
|
|
/* We assert here that the property is one of the few expected ones.
|
|
|
|
|
*
|
|
|
|
|
* Future properties should not made writable! Add a D-Bus method instead,
|
|
|
|
|
* they are more flexible (for example, you can set multiple properties at
|
|
|
|
|
* once). */
|
|
|
|
|
|
|
|
|
|
if (NM_IS_DEVICE(obj)) {
|
|
|
|
|
nm_assert(NM_IN_STRSET(property_name,
|
|
|
|
|
NM_DEVICE_MANAGED,
|
|
|
|
|
NM_DEVICE_AUTOCONNECT,
|
|
|
|
|
NM_DEVICE_STATISTICS_REFRESH_RATE_MS));
|
|
|
|
|
return (*str_to_free = g_variant_print(value, FALSE));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nm_assert(NM_IS_MANAGER(obj));
|
|
|
|
|
if (NM_IN_STRSET(property_name,
|
|
|
|
|
NM_MANAGER_WIRELESS_ENABLED,
|
|
|
|
|
NM_MANAGER_WWAN_ENABLED,
|
|
|
|
|
NM_MANAGER_WIMAX_ENABLED,
|
|
|
|
|
NM_MANAGER_CONNECTIVITY_CHECK_ENABLED)) {
|
|
|
|
|
return (*str_to_free = g_strdup_printf("%s:%s",
|
|
|
|
|
property_name,
|
|
|
|
|
g_variant_get_boolean(value) ? "on" : "off"));
|
|
|
|
|
}
|
|
|
|
|
if (NM_IN_STRSET(property_name, NM_MANAGER_GLOBAL_DNS_CONFIGURATION)) {
|
|
|
|
|
return NM_MANAGER_GLOBAL_DNS_CONFIGURATION;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nm_assert_unreachable_val("???");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* this is a macro to catch the caller's line number. */
|
|
|
|
|
#define _dbus_set_property_audit_log(obj, \
|
|
|
|
|
audit_op, \
|
|
|
|
|
auth_subject, \
|
|
|
|
|
property_name, \
|
|
|
|
|
value, \
|
|
|
|
|
error_message) \
|
|
|
|
|
G_STMT_START \
|
|
|
|
|
{ \
|
|
|
|
|
NMDBusObject *const _obj = (obj); \
|
|
|
|
|
const char *const _audit_op = (audit_op); \
|
|
|
|
|
NMAuthSubject *const _auth_subject = (auth_subject); \
|
|
|
|
|
const char *const _property_name = (property_name); \
|
|
|
|
|
GVariant *const _value = (value); \
|
|
|
|
|
const char *const _error_message = (error_message); \
|
2021-11-09 13:28:54 +01:00
|
|
|
gs_free char *_args_to_free = NULL; \
|
2021-04-12 10:04:19 +02:00
|
|
|
\
|
|
|
|
|
if (NM_IS_DEVICE(_obj)) { \
|
|
|
|
|
nm_audit_log_device_op(_audit_op, \
|
|
|
|
|
NM_DEVICE(_obj), \
|
|
|
|
|
!_error_message, \
|
|
|
|
|
_dbus_set_property_audit_log_get_args(_obj, \
|
|
|
|
|
_property_name, \
|
|
|
|
|
_value, \
|
|
|
|
|
&_args_to_free), \
|
|
|
|
|
_auth_subject, \
|
|
|
|
|
_error_message); \
|
|
|
|
|
} else { \
|
|
|
|
|
nm_audit_log_control_op(_audit_op, \
|
|
|
|
|
_dbus_set_property_audit_log_get_args(_obj, \
|
|
|
|
|
_property_name, \
|
|
|
|
|
_value, \
|
|
|
|
|
&_args_to_free), \
|
|
|
|
|
!_error_message, \
|
|
|
|
|
_auth_subject, \
|
|
|
|
|
_error_message); \
|
|
|
|
|
} \
|
|
|
|
|
} \
|
|
|
|
|
G_STMT_END
|
|
|
|
|
|
2010-08-25 15:12:32 -05:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
_dbus_set_property_auth_cb(NMAuthChain *chain,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
gpointer user_data)
|
2010-08-25 15:12:32 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
DBusSetPropertyHandle *handle_data = user_data;
|
|
|
|
|
gs_unref_object NMDBusObject *obj = handle_data->obj;
|
|
|
|
|
const NMDBusInterfaceInfoExtended *interface_info = handle_data->interface_info;
|
|
|
|
|
const NMDBusPropertyInfoExtended *property_info = handle_data->property_info;
|
|
|
|
|
gs_unref_variant GVariant *value = handle_data->value;
|
|
|
|
|
guint64 export_version_id = handle_data->export_version_id;
|
|
|
|
|
gs_unref_object NMManager *self = handle_data->self;
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMAuthCallResult result;
|
|
|
|
|
gs_free_error GError *local = NULL;
|
|
|
|
|
const char *error_name = NULL;
|
|
|
|
|
const char *error_message = NULL;
|
|
|
|
|
GValue gvalue;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_slice_free(DBusSetPropertyHandle, handle_data);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_unlink(nm_auth_chain_parent_lst_list(chain));
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
result = nm_auth_chain_get_result(chain, property_info->writable.permission);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-04 09:37:54 +02:00
|
|
|
if (result != NM_AUTH_CALL_RESULT_YES) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
error_name = NM_PERM_DENIED_ERROR;
|
2019-05-04 09:37:54 +02:00
|
|
|
error_message = "Not authorized to perform this operation";
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
goto out;
|
2010-08-25 15:12:32 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
if (export_version_id != nm_dbus_object_get_export_version_id(obj)) {
|
|
|
|
|
error_name = "org.freedesktop.DBus.Error.UnknownObject";
|
|
|
|
|
error_message = "Object was deleted while authenticating";
|
|
|
|
|
goto out;
|
2015-09-15 11:34:00 +02:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
/* Handle some properties specially *sigh* */
|
|
|
|
|
if (interface_info == &interface_info_manager
|
|
|
|
|
&& nm_streq(property_info->property_name, NM_MANAGER_GLOBAL_DNS_CONFIGURATION)) {
|
|
|
|
|
const NMGlobalDnsConfig *global_dns;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-07-03 11:06:39 +02:00
|
|
|
global_dns = nm_config_data_get_global_dns_config(nm_config_get_data(priv->config));
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
if (global_dns && !nm_global_dns_config_is_internal(global_dns)) {
|
|
|
|
|
error_name = NM_PERM_DENIED_ERROR;
|
|
|
|
|
error_message = "Global DNS configuration already set via configuration file";
|
|
|
|
|
goto out;
|
2015-07-03 11:06:39 +02:00
|
|
|
}
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_gvariant_to_gvalue(value, &gvalue);
|
2018-11-22 14:39:22 +01:00
|
|
|
if (!nm_g_object_set_property(G_OBJECT(obj), property_info->property_name, &gvalue, &local)) {
|
|
|
|
|
error_name = "org.freedesktop.DBus.Error.InvalidArgs";
|
|
|
|
|
error_message = local->message;
|
|
|
|
|
}
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_value_unset(&gvalue);
|
2015-09-15 11:34:00 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
out:
|
2021-04-12 10:04:19 +02:00
|
|
|
_dbus_set_property_audit_log(obj,
|
|
|
|
|
property_info->writable.audit_op,
|
|
|
|
|
nm_auth_chain_get_subject(chain),
|
|
|
|
|
property_info->property_name,
|
|
|
|
|
value,
|
|
|
|
|
error_message);
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
if (error_message)
|
|
|
|
|
g_dbus_method_invocation_return_dbus_error(invocation, error_name, error_message);
|
|
|
|
|
else
|
|
|
|
|
g_dbus_method_invocation_return_value(invocation, NULL);
|
2010-08-25 15:12:32 -05:00
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
void
|
2021-11-09 13:28:54 +01:00
|
|
|
nm_manager_dbus_set_property_handle(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusPropertyInfoExtended *property_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *value,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
gpointer user_data)
|
2010-08-25 15:12:32 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = user_data;
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMAuthChain *chain;
|
|
|
|
|
const char *error_message = NULL;
|
|
|
|
|
gs_unref_object NMAuthSubject *subject = NULL;
|
|
|
|
|
DBusSetPropertyHandle *handle_data;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-04-12 10:04:19 +02:00
|
|
|
/* we only have writable properties on Device or Manager. In the future,
|
|
|
|
|
* we probably should not add new API with writable properties. Add
|
|
|
|
|
* methods instead. Systemd also avoids writable properties. */
|
|
|
|
|
nm_assert(obj == (gpointer) self || NM_IS_DEVICE(obj));
|
|
|
|
|
|
2019-12-19 11:30:38 +01:00
|
|
|
subject = nm_dbus_manager_new_auth_subject_from_context(invocation);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
if (!subject) {
|
2019-09-04 10:18:56 +02:00
|
|
|
error_message = NM_UTILS_ERROR_MSG_REQ_UID_UKNOWN;
|
2021-04-12 10:04:19 +02:00
|
|
|
|
|
|
|
|
_dbus_set_property_audit_log(obj,
|
|
|
|
|
property_info->writable.audit_op,
|
|
|
|
|
NULL,
|
|
|
|
|
property_info->property_name,
|
|
|
|
|
value,
|
|
|
|
|
error_message);
|
|
|
|
|
|
|
|
|
|
g_dbus_method_invocation_return_error_literal(invocation,
|
|
|
|
|
G_DBUS_ERROR,
|
|
|
|
|
G_DBUS_ERROR_AUTH_FAILED,
|
|
|
|
|
error_message);
|
|
|
|
|
return;
|
2010-08-25 15:12:32 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
handle_data = g_slice_new0(DBusSetPropertyHandle);
|
|
|
|
|
handle_data->self = g_object_ref(self);
|
|
|
|
|
handle_data->obj = g_object_ref(obj);
|
|
|
|
|
handle_data->interface_info = interface_info;
|
|
|
|
|
handle_data->property_info = property_info;
|
|
|
|
|
handle_data->value = g_variant_ref(value);
|
|
|
|
|
handle_data->export_version_id = nm_dbus_object_get_export_version_id(obj);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_subject(subject, invocation, _dbus_set_property_auth_cb, handle_data);
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_link_tail(&priv->auth_lst_head, nm_auth_chain_parent_lst_list(chain));
|
2019-05-04 10:31:18 +02:00
|
|
|
nm_auth_chain_add_call_unsafe(chain, property_info->writable.permission, TRUE);
|
2015-08-20 14:25:46 +02:00
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2015-08-20 14:25:46 +02:00
|
|
|
|
2016-07-01 12:11:01 +02:00
|
|
|
static NMCheckpointManager *
|
|
|
|
|
_checkpoint_mgr_get(NMManager *self, gboolean create_as_needed)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
if (G_UNLIKELY(!priv->checkpoint_mgr) && create_as_needed)
|
2017-10-21 16:05:14 +02:00
|
|
|
priv->checkpoint_mgr = nm_checkpoint_manager_new(self, obj_properties[PROP_CHECKPOINTS]);
|
2016-07-01 12:11:01 +02:00
|
|
|
return priv->checkpoint_mgr;
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-01 17:57:13 +02:00
|
|
|
static void
|
|
|
|
|
checkpoint_auth_done_cb(NMAuthChain *chain, GDBusMethodInvocation *context, gpointer user_data)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(user_data);
|
|
|
|
|
char *op;
|
|
|
|
|
char *checkpoint_path = NULL;
|
|
|
|
|
char **devices;
|
|
|
|
|
NMCheckpoint *checkpoint;
|
2016-08-01 17:57:13 +02:00
|
|
|
NMAuthCallResult result;
|
|
|
|
|
guint32 timeout, flags;
|
2021-11-09 13:28:54 +01:00
|
|
|
GVariant *variant = NULL;
|
|
|
|
|
GError *error = NULL;
|
|
|
|
|
const char *arg = NULL;
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
guint32 add_timeout;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-08-01 17:19:14 +02:00
|
|
|
op = nm_auth_chain_get_data(chain, "audit-op");
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_unlink(nm_auth_chain_parent_lst_list(chain));
|
2016-08-01 17:57:13 +02:00
|
|
|
result = nm_auth_chain_get_result(chain, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
if (NM_IN_STRSET(op,
|
|
|
|
|
NM_AUDIT_OP_CHECKPOINT_DESTROY,
|
|
|
|
|
NM_AUDIT_OP_CHECKPOINT_ROLLBACK,
|
|
|
|
|
NM_AUDIT_OP_CHECKPOINT_ADJUST_ROLLBACK_TIMEOUT))
|
2016-08-01 17:19:14 +02:00
|
|
|
arg = checkpoint_path = nm_auth_chain_get_data(chain, "checkpoint_path");
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-04 09:37:54 +02:00
|
|
|
if (result != NM_AUTH_CALL_RESULT_YES) {
|
2016-08-01 17:57:13 +02:00
|
|
|
error = g_error_new_literal(NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
|
|
|
|
"Not authorized to checkpoint/rollback");
|
|
|
|
|
} else {
|
2016-08-01 17:19:14 +02:00
|
|
|
if (nm_streq0(op, NM_AUDIT_OP_CHECKPOINT_CREATE)) {
|
2016-08-01 17:57:13 +02:00
|
|
|
timeout = GPOINTER_TO_UINT(nm_auth_chain_get_data(chain, "timeout"));
|
|
|
|
|
flags = GPOINTER_TO_UINT(nm_auth_chain_get_data(chain, "flags"));
|
|
|
|
|
devices = nm_auth_chain_get_data(chain, "devices");
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-08-01 17:57:13 +02:00
|
|
|
checkpoint = nm_checkpoint_manager_create(_checkpoint_mgr_get(self, TRUE),
|
|
|
|
|
(const char *const *) devices,
|
|
|
|
|
timeout,
|
|
|
|
|
(NMCheckpointCreateFlags) flags,
|
|
|
|
|
&error);
|
|
|
|
|
if (checkpoint) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
arg = nm_dbus_object_get_path(NM_DBUS_OBJECT(checkpoint));
|
2016-08-01 17:19:14 +02:00
|
|
|
variant = g_variant_new("(o)", arg);
|
2016-08-01 17:57:13 +02:00
|
|
|
}
|
2016-08-01 17:19:14 +02:00
|
|
|
} else if (nm_streq0(op, NM_AUDIT_OP_CHECKPOINT_DESTROY)) {
|
2016-08-01 17:57:13 +02:00
|
|
|
nm_checkpoint_manager_destroy(_checkpoint_mgr_get(self, TRUE), checkpoint_path, &error);
|
2016-08-01 17:19:14 +02:00
|
|
|
} else if (nm_streq0(op, NM_AUDIT_OP_CHECKPOINT_ROLLBACK)) {
|
2016-08-01 17:57:13 +02:00
|
|
|
nm_checkpoint_manager_rollback(_checkpoint_mgr_get(self, TRUE),
|
|
|
|
|
checkpoint_path,
|
|
|
|
|
&variant,
|
|
|
|
|
&error);
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
} else if (nm_streq0(op, NM_AUDIT_OP_CHECKPOINT_ADJUST_ROLLBACK_TIMEOUT)) {
|
|
|
|
|
add_timeout = GPOINTER_TO_UINT(nm_auth_chain_get_data(chain, "add_timeout"));
|
|
|
|
|
nm_checkpoint_manager_adjust_rollback_timeout(_checkpoint_mgr_get(self, TRUE),
|
|
|
|
|
checkpoint_path,
|
|
|
|
|
add_timeout,
|
|
|
|
|
&error);
|
2016-08-01 17:57:13 +02:00
|
|
|
} else
|
|
|
|
|
g_return_if_reached();
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-08-01 17:19:14 +02:00
|
|
|
nm_audit_log_checkpoint_op(op,
|
|
|
|
|
arg ?: "",
|
|
|
|
|
!error,
|
|
|
|
|
nm_auth_chain_get_subject(chain),
|
|
|
|
|
error ? error->message : NULL);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-08-01 17:57:13 +02:00
|
|
|
if (error)
|
|
|
|
|
g_dbus_method_invocation_take_error(context, error);
|
|
|
|
|
else
|
|
|
|
|
g_dbus_method_invocation_return_value(context, variant);
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-01 12:11:01 +02:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_checkpoint_create(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
{
|
2022-02-22 22:08:18 +01:00
|
|
|
NMManager *self = NM_MANAGER(obj);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMAuthChain *chain;
|
|
|
|
|
gs_strfreev char **devices = NULL;
|
|
|
|
|
guint32 rollback_timeout;
|
|
|
|
|
guint32 flags;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-07-01 12:11:01 +02:00
|
|
|
G_STATIC_ASSERT_EXPR(sizeof(flags) <= sizeof(NMCheckpointCreateFlags));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2022-02-22 22:08:18 +01:00
|
|
|
g_variant_get(parameters, "(^aouu)", &devices, &rollback_timeout, &flags);
|
|
|
|
|
|
|
|
|
|
if ((NMCheckpointCreateFlags) flags != flags
|
|
|
|
|
|| NM_FLAGS_ANY(flags,
|
|
|
|
|
~((guint32) (NM_CHECKPOINT_CREATE_FLAG_DESTROY_ALL
|
|
|
|
|
| NM_CHECKPOINT_CREATE_FLAG_DELETE_NEW_CONNECTIONS
|
|
|
|
|
| NM_CHECKPOINT_CREATE_FLAG_DISCONNECT_NEW_DEVICES
|
core: preserve external ports during checkpoint rollback
When we have a bridge interface with ports attached externally (that is,
not by NetworkManager itself), then it can make sense that during
checkpoint rollback we want to keep those ports attached.
During rollback, we may need to deactivate the bridge device and
re-activate it. Implement this, by setting a flag before deactivating,
which prevents external ports to be detached. The flag gets cleared,
when the device state changes to activated (the following activation)
or unmanaged.
This is an ugly solution, for several reasons.
For one, NMDevice tracks its ports in the "slaves" list. But what
it does is ugly. There is no clear concept to understand what it
actually tacks. For example, it tracks externally added interfaces
(nm_device_sys_iface_state_is_external()) that are attached while
not being connected. But it also tracks interfaces that we want to attach
during activation (but which are not yet actually enslaved). It also tracks
slaves that have no actual netdev device (OVS). So it's not clear what this
list contains and what it should contain at any point in time. When we skip
the change of the slaves states during nm_device_master_release_slaves_all(),
it's not really clear what the effects are. It's ugly, but probably correct
enough. What would be better, if we had a clear purpose of what the
lists (or several lists) mean. E.g. a list of all ports that are
currently, physically attached vs. a list of ports we want to attach vs.
a list of OVS slaves that have no actual netdev device.
Another problem is that we attach state on the device
("activation_state_preserve_external_ports"), which should linger there
during the deactivation and reactivation. How can we be sure that we don't
leave that flag dangling there, and that the desired following activation
is the one we cared about? If the follow-up activation fails short (e.g. an
unmanaged command comes first), will we properly disconnect the slaves?
Should we even? In practice, it might be correct enough.
Also, we only implement this for bridges. I think this is where it makes
the most sense. And after all, it's an odd thing to preserve unknown,
external things during a rollback -- unknown, because we have no knowledge
about why these ports are attached and what to do with them.
Also, the change doesn't remember the ports that were attached when the
checkpoint was created. Instead, we preserve all ports that are attached
during rollback. That seems more useful and easier to implement. So we
don't actually rollback to the configuration when the checkpoint was
created. Instead, we rollback, but keep external devices.
Also, we do this now by default and introduce a flag to get the previous
behavior.
https://bugzilla.redhat.com/show_bug.cgi?id=2035519
https://gitlab.freedesktop.org/NetworkManager/NetworkManager/-/issues/ # 909
2022-02-22 21:55:57 +01:00
|
|
|
| NM_CHECKPOINT_CREATE_FLAG_ALLOW_OVERLAPPING
|
|
|
|
|
| NM_CHECKPOINT_CREATE_FLAG_NO_PRESERVE_EXTERNAL_PORTS)))) {
|
2022-02-22 22:08:18 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal(invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_INVALID_ARGUMENTS,
|
|
|
|
|
"Invalid flags");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context(invocation, checkpoint_auth_done_cb, self);
|
2016-08-01 17:57:13 +02:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal(invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2019-09-04 10:18:56 +02:00
|
|
|
NM_UTILS_ERROR_MSG_REQ_AUTH_FAILED);
|
2016-07-01 12:11:01 +02:00
|
|
|
return;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_link_tail(&priv->auth_lst_head, nm_auth_chain_parent_lst_list(chain));
|
2016-08-01 17:19:14 +02:00
|
|
|
nm_auth_chain_set_data(chain, "audit-op", NM_AUDIT_OP_CHECKPOINT_CREATE, NULL);
|
2022-02-22 22:08:18 +01:00
|
|
|
nm_auth_chain_set_data(chain,
|
|
|
|
|
"devices",
|
|
|
|
|
g_steal_pointer(&devices),
|
|
|
|
|
(GDestroyNotify) g_strfreev);
|
2016-08-01 17:57:13 +02:00
|
|
|
nm_auth_chain_set_data(chain, "flags", GUINT_TO_POINTER(flags), NULL);
|
|
|
|
|
nm_auth_chain_set_data(chain, "timeout", GUINT_TO_POINTER(rollback_timeout), NULL);
|
|
|
|
|
nm_auth_chain_add_call(chain, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK, TRUE);
|
2016-07-01 12:11:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_checkpoint_destroy(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(obj);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMAuthChain *chain;
|
|
|
|
|
const char *checkpoint_path;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context(invocation, checkpoint_auth_done_cb, self);
|
2016-08-01 17:57:13 +02:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal(invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2019-09-04 10:18:56 +02:00
|
|
|
NM_UTILS_ERROR_MSG_REQ_AUTH_FAILED);
|
2016-07-01 12:11:01 +02:00
|
|
|
return;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get(parameters, "(&o)", &checkpoint_path);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_link_tail(&priv->auth_lst_head, nm_auth_chain_parent_lst_list(chain));
|
2016-08-01 17:19:14 +02:00
|
|
|
nm_auth_chain_set_data(chain, "audit-op", NM_AUDIT_OP_CHECKPOINT_DESTROY, NULL);
|
2016-08-01 17:57:13 +02:00
|
|
|
nm_auth_chain_set_data(chain, "checkpoint_path", g_strdup(checkpoint_path), g_free);
|
|
|
|
|
nm_auth_chain_add_call(chain, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK, TRUE);
|
2016-07-01 12:11:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_checkpoint_rollback(NMDBusObject *obj,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(obj);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMAuthChain *chain;
|
|
|
|
|
const char *checkpoint_path;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
chain = nm_auth_chain_new_context(invocation, checkpoint_auth_done_cb, self);
|
2016-08-01 17:57:13 +02:00
|
|
|
if (!chain) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_dbus_method_invocation_return_error_literal(invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2019-09-04 10:18:56 +02:00
|
|
|
NM_UTILS_ERROR_MSG_REQ_AUTH_FAILED);
|
2016-07-01 12:11:01 +02:00
|
|
|
return;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_variant_get(parameters, "(&o)", &checkpoint_path);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_link_tail(&priv->auth_lst_head, nm_auth_chain_parent_lst_list(chain));
|
2016-08-01 17:19:14 +02:00
|
|
|
nm_auth_chain_set_data(chain, "audit-op", NM_AUDIT_OP_CHECKPOINT_ROLLBACK, NULL);
|
2016-08-01 17:57:13 +02:00
|
|
|
nm_auth_chain_set_data(chain, "checkpoint_path", g_strdup(checkpoint_path), g_free);
|
|
|
|
|
nm_auth_chain_add_call(chain, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK, TRUE);
|
2016-07-01 12:11:01 +02:00
|
|
|
}
|
|
|
|
|
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
static void
|
2021-11-09 13:28:54 +01:00
|
|
|
impl_manager_checkpoint_adjust_rollback_timeout(NMDBusObject *obj,
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
const NMDBusInterfaceInfoExtended *interface_info,
|
2021-11-09 13:28:54 +01:00
|
|
|
const NMDBusMethodInfoExtended *method_info,
|
|
|
|
|
GDBusConnection *connection,
|
|
|
|
|
const char *sender,
|
|
|
|
|
GDBusMethodInvocation *invocation,
|
|
|
|
|
GVariant *parameters)
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(obj);
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
NMAuthChain *chain;
|
|
|
|
|
const char *checkpoint_path;
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
guint32 add_timeout;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
chain = nm_auth_chain_new_context(invocation, checkpoint_auth_done_cb, self);
|
|
|
|
|
if (!chain) {
|
|
|
|
|
g_dbus_method_invocation_return_error_literal(invocation,
|
|
|
|
|
NM_MANAGER_ERROR,
|
|
|
|
|
NM_MANAGER_ERROR_PERMISSION_DENIED,
|
2019-09-04 10:18:56 +02:00
|
|
|
NM_UTILS_ERROR_MSG_REQ_AUTH_FAILED);
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
return;
|
|
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
g_variant_get(parameters, "(&ou)", &checkpoint_path, &add_timeout);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_link_tail(&priv->auth_lst_head, nm_auth_chain_parent_lst_list(chain));
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
nm_auth_chain_set_data(chain, "audit-op", NM_AUDIT_OP_CHECKPOINT_ADJUST_ROLLBACK_TIMEOUT, NULL);
|
|
|
|
|
nm_auth_chain_set_data(chain, "checkpoint_path", g_strdup(checkpoint_path), g_free);
|
|
|
|
|
nm_auth_chain_set_data(chain, "add_timeout", GUINT_TO_POINTER(add_timeout), NULL);
|
|
|
|
|
nm_auth_chain_add_call(chain, NM_AUTH_PERMISSION_CHECKPOINT_ROLLBACK, TRUE);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2016-07-01 12:11:01 +02:00
|
|
|
|
core: add nm_manager_get_dns_manager() getter
nm_dns_manager_get() is already a singleton. So users usually
can just get it whenever they need -- except during shutdown
after the singleton was destroyed. This is usually fine, because
users really should not try to get it late during shutdown.
However, if you subscribe a signal handler on the singleton, then you
will also eventually want to unsubscribe it. While the moment when you
subscribe it is clearly not during late-shutdown, it's not clear how
to ensure that the signal listener gets destroyed before the DNS manager
singleton.
So usually, whenever you are going to subscribe a signal, you need to
make sure that the target object stays alive long enough. Which may
mean to keep a reference to it.
Next, we will have NMDevice subscribe to the singleton. With above said,
that would mean that potentially every NMDevice needs to keep a
reference to the NMDnsManager. That is not best. Also, later NMManager
will face the same problem, because it will also subscribe to
NMDnsManager.
So, instead let NMManager own a reference to the NMDnsManager. This
ensures the lifetimes are properly guarded (NMDevice also references
NMManager already).
Also, access nm_dns_manager_get() lazy on first use, to only initialize
it when needed the first time (which might be quite late).
2022-04-08 11:40:42 +02:00
|
|
|
NMDnsManager *
|
|
|
|
|
nm_manager_get_dns_manager(NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv;
|
|
|
|
|
|
|
|
|
|
g_return_val_if_fail(NM_IS_MANAGER(self), NULL);
|
|
|
|
|
|
|
|
|
|
priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
if (G_UNLIKELY(!priv->dns_mgr)) {
|
|
|
|
|
/* Initialize lazily on first use.
|
|
|
|
|
*
|
|
|
|
|
* But keep a reference. This is to ensure proper lifetimes between
|
|
|
|
|
* singleton instances (i.e. nm_dns_manager_get() outlives NMManager). */
|
|
|
|
|
priv->dns_mgr = g_object_ref(nm_dns_manager_get());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return priv->dns_mgr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*****************************************************************************/
|
|
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
static void
|
2016-07-01 14:25:34 +02:00
|
|
|
auth_mgr_changed(NMAuthManager *auth_manager, gpointer user_data)
|
2014-01-24 13:24:01 -06:00
|
|
|
{
|
|
|
|
|
/* Let clients know they should re-check their authorization */
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_emit_signal(user_data,
|
|
|
|
|
&interface_info_manager,
|
|
|
|
|
&signal_info_check_permissions,
|
|
|
|
|
"()");
|
2014-01-24 13:24:01 -06:00
|
|
|
}
|
|
|
|
|
|
2022-02-01 22:19:36 +01:00
|
|
|
/*****************************************************************************/
|
2014-01-24 13:24:01 -06:00
|
|
|
|
2021-01-25 15:02:35 +01:00
|
|
|
void
|
|
|
|
|
nm_manager_unblock_failed_ovs_interfaces(NMManager *self)
|
|
|
|
|
{
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
|
|
|
|
|
nm_policy_unblock_failed_ovs_interfaces(priv->policy);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2014-01-24 13:24:01 -06:00
|
|
|
|
2016-09-23 14:32:50 +02:00
|
|
|
void
|
|
|
|
|
nm_manager_set_capability(NMManager *self, NMCapability cap)
|
2016-09-15 23:34:24 +03:00
|
|
|
{
|
2016-09-23 14:32:50 +02:00
|
|
|
NMManagerPrivate *priv;
|
|
|
|
|
guint32 cap_i;
|
|
|
|
|
gssize idx;
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2016-09-23 14:32:50 +02:00
|
|
|
g_return_if_fail(NM_IS_MANAGER(self));
|
2019-12-20 17:52:34 +01:00
|
|
|
if (cap < 1 || cap > _NM_CAPABILITY_MAX)
|
2016-09-23 14:32:50 +02:00
|
|
|
g_return_if_reached();
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2016-09-23 14:32:50 +02:00
|
|
|
cap_i = (guint32) cap;
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2016-09-23 14:32:50 +02:00
|
|
|
priv = NM_MANAGER_GET_PRIVATE(self);
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2022-09-27 09:08:35 +02:00
|
|
|
idx = nm_array_find_bsearch(nm_g_array_index_p(priv->capabilities, guint32, 0),
|
|
|
|
|
priv->capabilities->len,
|
2022-09-27 09:14:30 +02:00
|
|
|
sizeof(guint32),
|
2022-09-27 09:08:35 +02:00
|
|
|
&cap_i,
|
|
|
|
|
nm_cmp_uint32_p_with_data,
|
|
|
|
|
NULL);
|
2016-09-23 14:32:50 +02:00
|
|
|
if (idx >= 0)
|
|
|
|
|
return;
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2016-09-23 14:32:50 +02:00
|
|
|
nm_assert((~idx) <= (gssize) priv->capabilities->len);
|
|
|
|
|
|
|
|
|
|
g_array_insert_val(priv->capabilities, ~idx, cap_i);
|
|
|
|
|
_notify(self, PROP_CAPABILITIES);
|
2016-09-15 23:34:24 +03:00
|
|
|
}
|
|
|
|
|
|
2021-01-19 18:57:58 +01:00
|
|
|
void
|
|
|
|
|
nm_manager_emit_device_ifindex_changed(NMManager *self, NMDevice *device)
|
|
|
|
|
{
|
|
|
|
|
g_signal_emit(self, signals[DEVICE_IFINDEX_CHANGED], 0, device);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-02 18:22:50 +02:00
|
|
|
/*****************************************************************************/
|
2016-09-15 23:34:24 +03:00
|
|
|
|
2015-08-10 16:38:26 +02:00
|
|
|
NM_DEFINE_SINGLETON_REGISTER(NMManager);
|
2011-09-26 11:30:24 -05:00
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
NMManager *
|
2011-09-26 11:30:24 -05:00
|
|
|
nm_manager_get(void)
|
|
|
|
|
{
|
2016-01-25 16:29:51 +01:00
|
|
|
g_return_val_if_fail(singleton_instance, NULL);
|
2015-08-03 09:26:31 -04:00
|
|
|
return singleton_instance;
|
2011-09-26 11:30:24 -05:00
|
|
|
}
|
|
|
|
|
|
2016-05-16 18:05:02 +02:00
|
|
|
NMSettings *
|
|
|
|
|
nm_settings_get(void)
|
|
|
|
|
{
|
|
|
|
|
g_return_val_if_fail(singleton_instance, NULL);
|
|
|
|
|
|
|
|
|
|
return NM_MANAGER_GET_PRIVATE(singleton_instance)->settings;
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-26 11:30:24 -05:00
|
|
|
NMManager *
|
2015-11-06 16:45:27 +01:00
|
|
|
nm_manager_setup(void)
|
2009-06-11 00:39:12 -04:00
|
|
|
{
|
2015-08-03 09:26:31 -04:00
|
|
|
NMManager *self;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2016-01-25 16:29:51 +01:00
|
|
|
g_return_val_if_fail(!singleton_instance, singleton_instance);
|
2016-01-18 14:22:50 +01:00
|
|
|
|
2015-11-06 16:45:27 +01:00
|
|
|
self = g_object_new(NM_TYPE_MANAGER, NULL);
|
2016-01-18 14:22:50 +01:00
|
|
|
nm_assert(NM_IS_MANAGER(self));
|
|
|
|
|
singleton_instance = self;
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2016-01-18 14:18:50 +01:00
|
|
|
nm_singleton_instance_register();
|
2019-05-05 15:40:04 +02:00
|
|
|
nm_log_dbg(LOGD_CORE,
|
|
|
|
|
"setup %s singleton (" NM_HASH_OBFUSCATE_PTR_FMT ")",
|
|
|
|
|
"NMManager",
|
|
|
|
|
NM_HASH_OBFUSCATE_PTR(singleton_instance));
|
2016-01-18 14:18:50 +01:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_export(NM_DBUS_OBJECT(self));
|
2016-01-18 14:18:50 +01:00
|
|
|
return self;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
constructed(GObject *object)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(object);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2016-04-07 18:42:24 +02:00
|
|
|
const NMConfigState *state;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-01-18 14:18:50 +01:00
|
|
|
G_OBJECT_CLASS(nm_manager_parent_class)->constructed(object);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2020-08-11 15:32:46 +02:00
|
|
|
priv->settings = nm_settings_new(self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_object_export(NM_DBUS_OBJECT(priv->settings));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-08-03 09:26:31 -04:00
|
|
|
g_signal_connect(priv->settings,
|
|
|
|
|
"notify::" NM_SETTINGS_STARTUP_COMPLETE,
|
|
|
|
|
G_CALLBACK(settings_startup_complete_changed),
|
|
|
|
|
self);
|
|
|
|
|
g_signal_connect(priv->settings,
|
|
|
|
|
"notify::" NM_SETTINGS_UNMANAGED_SPECS,
|
|
|
|
|
G_CALLBACK(system_unmanaged_devices_changed_cb),
|
|
|
|
|
self);
|
2017-12-05 13:55:25 +01:00
|
|
|
g_signal_connect(priv->settings,
|
|
|
|
|
NM_SETTINGS_SIGNAL_CONNECTION_FLAGS_CHANGED,
|
|
|
|
|
G_CALLBACK(connection_flags_changed),
|
|
|
|
|
self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-04-23 14:20:37 +02:00
|
|
|
priv->hostname_manager = g_object_ref(nm_hostname_manager_get());
|
|
|
|
|
g_signal_connect(priv->hostname_manager,
|
2022-01-04 18:28:46 +01:00
|
|
|
"notify::" NM_HOSTNAME_MANAGER_STATIC_HOSTNAME,
|
|
|
|
|
G_CALLBACK(_static_hostname_changed_cb),
|
2017-04-23 14:20:37 +02:00
|
|
|
self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-13 16:03:06 +02:00
|
|
|
/*
|
|
|
|
|
* Do not delete existing virtual devices to keep connectivity up.
|
|
|
|
|
* Virtual devices are reused when NetworkManager is restarted.
|
|
|
|
|
* Hence, don't react on NM_SETTINGS_SIGNAL_CONNECTION_REMOVED.
|
|
|
|
|
*/
|
2015-08-03 09:26:31 -04:00
|
|
|
|
|
|
|
|
priv->policy = nm_policy_new(self, priv->settings);
|
2018-06-28 18:05:05 +02:00
|
|
|
g_signal_connect(priv->policy,
|
|
|
|
|
"notify::" NM_POLICY_DEFAULT_IP4_AC,
|
|
|
|
|
G_CALLBACK(policy_default_ac_changed),
|
|
|
|
|
self);
|
|
|
|
|
g_signal_connect(priv->policy,
|
|
|
|
|
"notify::" NM_POLICY_DEFAULT_IP6_AC,
|
|
|
|
|
G_CALLBACK(policy_default_ac_changed),
|
|
|
|
|
self);
|
|
|
|
|
g_signal_connect(priv->policy,
|
|
|
|
|
"notify::" NM_POLICY_ACTIVATING_IP4_AC,
|
|
|
|
|
G_CALLBACK(policy_activating_ac_changed),
|
|
|
|
|
self);
|
|
|
|
|
g_signal_connect(priv->policy,
|
|
|
|
|
"notify::" NM_POLICY_ACTIVATING_IP6_AC,
|
|
|
|
|
G_CALLBACK(policy_activating_ac_changed),
|
|
|
|
|
self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-01-30 19:52:53 +01:00
|
|
|
priv->config = g_object_ref(nm_config_get());
|
|
|
|
|
g_signal_connect(G_OBJECT(priv->config),
|
|
|
|
|
NM_CONFIG_SIGNAL_CONFIG_CHANGED,
|
|
|
|
|
G_CALLBACK(_config_changed_cb),
|
2015-08-03 09:26:31 -04:00
|
|
|
self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-07 18:42:24 +02:00
|
|
|
state = nm_config_state_get(priv->config);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-07 18:42:24 +02:00
|
|
|
priv->net_enabled = state->net_enabled;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2022-02-01 20:21:34 +01:00
|
|
|
priv->radio_states[NM_RFKILL_TYPE_WLAN].user_enabled = state->wifi_enabled;
|
|
|
|
|
priv->radio_states[NM_RFKILL_TYPE_WWAN].user_enabled = state->wwan_enabled;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2013-05-09 10:24:08 -04:00
|
|
|
priv->rfkill_mgr = nm_rfkill_manager_new();
|
|
|
|
|
g_signal_connect(priv->rfkill_mgr,
|
2016-08-23 11:45:51 +02:00
|
|
|
NM_RFKILL_MANAGER_SIGNAL_RFKILL_CHANGED,
|
2013-05-09 10:24:08 -04:00
|
|
|
G_CALLBACK(rfkill_manager_rfkill_changed_cb),
|
2015-08-03 09:26:31 -04:00
|
|
|
self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-11-21 11:32:38 +01:00
|
|
|
/* Force kernel Wi-Fi/WWAN rfkill state to follow NM saved Wi-Fi/WWAN state
|
2013-05-31 14:11:56 -05:00
|
|
|
* in case the BIOS doesn't save rfkill state, and to be consistent with user
|
|
|
|
|
* changes to the WirelessEnabled/WWANEnabled properties which toggle kernel
|
|
|
|
|
* rfkill.
|
2013-02-06 12:59:50 -06:00
|
|
|
*/
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update_system(self,
|
|
|
|
|
NM_RFKILL_TYPE_WLAN,
|
|
|
|
|
priv->radio_states[NM_RFKILL_TYPE_WLAN].user_enabled);
|
|
|
|
|
_rfkill_update_system(self,
|
|
|
|
|
NM_RFKILL_TYPE_WWAN,
|
|
|
|
|
priv->radio_states[NM_RFKILL_TYPE_WWAN].user_enabled);
|
2009-06-11 00:39:12 -04:00
|
|
|
}
|
|
|
|
|
|
2011-07-01 14:16:52 -05:00
|
|
|
static void
|
2016-03-02 11:38:26 +01:00
|
|
|
nm_manager_init(NMManager *self)
|
2009-06-11 00:39:12 -04:00
|
|
|
{
|
2016-03-02 11:38:26 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
GFile *file;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
c_list_init(&priv->auth_lst_head);
|
2017-09-29 15:04:53 +02:00
|
|
|
c_list_init(&priv->link_cb_lst);
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
c_list_init(&priv->devices_lst_head);
|
2017-11-23 21:30:09 +01:00
|
|
|
c_list_init(&priv->active_connections_lst_head);
|
2018-04-18 09:06:54 +02:00
|
|
|
c_list_init(&priv->async_op_lst_head);
|
2017-12-05 13:55:25 +01:00
|
|
|
c_list_init(&priv->delete_volatile_connection_lst_head);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-09-29 15:11:33 +02:00
|
|
|
priv->platform = g_object_ref(NM_PLATFORM_GET);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-09-23 13:24:10 +02:00
|
|
|
priv->capabilities = g_array_new(FALSE, FALSE, sizeof(guint32));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2022-02-01 22:19:36 +01:00
|
|
|
priv->radio_states[NM_RFKILL_TYPE_WLAN] = (RfkillRadioState){
|
2022-02-01 20:07:18 +01:00
|
|
|
.user_enabled = TRUE,
|
2022-02-01 22:19:36 +01:00
|
|
|
.sw_enabled = FALSE,
|
2022-02-01 20:07:18 +01:00
|
|
|
.hw_enabled = TRUE,
|
2022-02-16 15:54:44 +02:00
|
|
|
.os_owner = TRUE,
|
2022-02-01 20:07:18 +01:00
|
|
|
};
|
2022-02-01 22:19:36 +01:00
|
|
|
priv->radio_states[NM_RFKILL_TYPE_WWAN] = (RfkillRadioState){
|
2022-02-01 20:07:18 +01:00
|
|
|
.user_enabled = TRUE,
|
2022-02-01 22:19:36 +01:00
|
|
|
.sw_enabled = FALSE,
|
2022-02-01 20:07:18 +01:00
|
|
|
.hw_enabled = TRUE,
|
2022-02-16 15:54:44 +02:00
|
|
|
.os_owner = TRUE,
|
2022-02-01 20:07:18 +01:00
|
|
|
};
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
priv->sleeping = FALSE;
|
|
|
|
|
priv->state = NM_STATE_DISCONNECTED;
|
|
|
|
|
priv->startup = TRUE;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
/* sleep/wake handling */
|
2016-04-28 13:45:49 +02:00
|
|
|
priv->sleep_monitor = nm_sleep_monitor_new();
|
2015-01-05 16:37:45 +01:00
|
|
|
g_signal_connect(priv->sleep_monitor, NM_SLEEP_MONITOR_SLEEPING, G_CALLBACK(sleeping_cb), self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
/* Listen for authorization changes */
|
2016-07-01 14:25:34 +02:00
|
|
|
priv->auth_mgr = g_object_ref(nm_auth_manager_get());
|
|
|
|
|
g_signal_connect(priv->auth_mgr,
|
2014-08-14 13:34:57 +02:00
|
|
|
NM_AUTH_MANAGER_SIGNAL_CHANGED,
|
2016-07-01 14:25:34 +02:00
|
|
|
G_CALLBACK(auth_mgr_changed),
|
2016-03-02 11:38:26 +01:00
|
|
|
self);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
/* Monitor the firmware directory */
|
|
|
|
|
if (strlen(KERNEL_FIRMWARE_DIR)) {
|
|
|
|
|
file = g_file_new_for_path(KERNEL_FIRMWARE_DIR "/");
|
|
|
|
|
priv->fw_monitor = g_file_monitor_directory(file, G_FILE_MONITOR_NONE, NULL, NULL);
|
|
|
|
|
g_object_unref(file);
|
2011-04-22 14:56:31 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
if (priv->fw_monitor) {
|
|
|
|
|
g_signal_connect(priv->fw_monitor, "changed", G_CALLBACK(firmware_dir_changed), self);
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGI(LOGD_CORE, "monitoring kernel firmware directory '%s'.", KERNEL_FIRMWARE_DIR);
|
2011-04-22 14:56:31 -05:00
|
|
|
} else {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGW(LOGD_CORE, "failed to monitor kernel firmware directory '%s'.", KERNEL_FIRMWARE_DIR);
|
2011-04-22 14:56:31 -05:00
|
|
|
}
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-06-03 09:15:24 +02:00
|
|
|
priv->metered = NM_METERED_UNKNOWN;
|
2017-11-15 16:06:43 +01:00
|
|
|
priv->sleep_devices = g_hash_table_new(nm_direct_hash, NULL);
|
2009-06-11 00:39:12 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
get_property(GObject *object, guint prop_id, GValue *value, GParamSpec *pspec)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(object);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
|
|
|
|
NMConfigData *config_data;
|
2015-07-03 11:06:39 +02:00
|
|
|
const NMGlobalDnsConfig *dns_config;
|
2021-11-09 13:28:54 +01:00
|
|
|
const char *type;
|
|
|
|
|
const char *path;
|
|
|
|
|
NMActiveConnection *ac;
|
|
|
|
|
GPtrArray *ptrarr;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
switch (prop_id) {
|
2010-09-27 10:34:56 +02:00
|
|
|
case PROP_VERSION:
|
|
|
|
|
g_value_set_string(value, VERSION);
|
|
|
|
|
break;
|
2016-09-15 23:34:24 +03:00
|
|
|
case PROP_CAPABILITIES:
|
2016-09-23 13:16:20 +02:00
|
|
|
g_value_set_variant(value,
|
2021-04-15 09:43:42 +02:00
|
|
|
nm_g_variant_new_au((const guint32 *) priv->capabilities->data,
|
|
|
|
|
priv->capabilities->len));
|
2016-09-15 23:34:24 +03:00
|
|
|
break;
|
2009-06-11 00:39:12 -04:00
|
|
|
case PROP_STATE:
|
|
|
|
|
g_value_set_uint(value, priv->state);
|
|
|
|
|
break;
|
2013-08-13 17:45:34 -04:00
|
|
|
case PROP_STARTUP:
|
|
|
|
|
g_value_set_boolean(value, priv->startup);
|
|
|
|
|
break;
|
2010-05-22 08:55:30 -07:00
|
|
|
case PROP_NETWORKING_ENABLED:
|
|
|
|
|
g_value_set_boolean(value, priv->net_enabled);
|
|
|
|
|
break;
|
2009-06-11 00:39:12 -04:00
|
|
|
case PROP_WIRELESS_ENABLED:
|
2022-02-01 22:19:36 +01:00
|
|
|
g_value_set_boolean(value, _rfkill_radio_state_get(self, NM_RFKILL_TYPE_WLAN));
|
2009-06-11 00:39:12 -04:00
|
|
|
break;
|
|
|
|
|
case PROP_WIRELESS_HARDWARE_ENABLED:
|
2022-02-01 20:21:34 +01:00
|
|
|
g_value_set_boolean(value, priv->radio_states[NM_RFKILL_TYPE_WLAN].hw_enabled);
|
2009-06-11 00:39:12 -04:00
|
|
|
break;
|
2009-12-23 00:18:18 -08:00
|
|
|
case PROP_WWAN_ENABLED:
|
2022-02-01 22:19:36 +01:00
|
|
|
g_value_set_boolean(value, _rfkill_radio_state_get(self, NM_RFKILL_TYPE_WWAN));
|
2009-12-23 00:18:18 -08:00
|
|
|
break;
|
|
|
|
|
case PROP_WWAN_HARDWARE_ENABLED:
|
2022-02-01 20:21:34 +01:00
|
|
|
g_value_set_boolean(value, priv->radio_states[NM_RFKILL_TYPE_WWAN].hw_enabled);
|
2009-06-11 00:39:12 -04:00
|
|
|
break;
|
2009-12-29 11:27:10 +02:00
|
|
|
case PROP_WIMAX_ENABLED:
|
2015-08-10 16:56:21 +02:00
|
|
|
g_value_set_boolean(value, FALSE);
|
2009-12-29 11:27:10 +02:00
|
|
|
break;
|
|
|
|
|
case PROP_WIMAX_HARDWARE_ENABLED:
|
2015-08-10 16:56:21 +02:00
|
|
|
g_value_set_boolean(value, FALSE);
|
2009-12-29 11:27:10 +02:00
|
|
|
break;
|
2022-03-21 10:19:37 +01:00
|
|
|
case PROP_RADIO_FLAGS:
|
|
|
|
|
g_value_set_uint(value, priv->radio_flags);
|
|
|
|
|
break;
|
2009-06-11 00:39:12 -04:00
|
|
|
case PROP_ACTIVE_CONNECTIONS:
|
2017-11-23 21:30:09 +01:00
|
|
|
ptrarr = g_ptr_array_new();
|
2020-11-04 13:53:57 +01:00
|
|
|
c_list_for_each_entry_prev (ac,
|
|
|
|
|
&priv->active_connections_lst_head,
|
|
|
|
|
active_connections_lst) {
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
path = nm_dbus_object_get_path(NM_DBUS_OBJECT(ac));
|
2017-11-23 21:30:09 +01:00
|
|
|
if (path)
|
|
|
|
|
g_ptr_array_add(ptrarr, g_strdup(path));
|
|
|
|
|
}
|
|
|
|
|
g_ptr_array_add(ptrarr, NULL);
|
|
|
|
|
g_value_take_boxed(value, g_ptr_array_free(ptrarr, FALSE));
|
2009-06-11 00:39:12 -04:00
|
|
|
break;
|
2013-07-30 16:31:31 -04:00
|
|
|
case PROP_CONNECTIVITY:
|
2017-03-20 13:36:00 +00:00
|
|
|
g_value_set_uint(value, priv->connectivity_state);
|
2013-07-30 16:31:31 -04:00
|
|
|
break;
|
2017-08-09 15:20:04 +08:00
|
|
|
case PROP_CONNECTIVITY_CHECK_AVAILABLE:
|
|
|
|
|
config_data = nm_config_get_data(priv->config);
|
|
|
|
|
g_value_set_boolean(value, nm_config_data_get_connectivity_uri(config_data) != NULL);
|
|
|
|
|
break;
|
|
|
|
|
case PROP_CONNECTIVITY_CHECK_ENABLED:
|
2018-04-10 15:55:16 +02:00
|
|
|
g_value_set_boolean(value, concheck_enabled(self, NULL));
|
2017-08-09 15:20:04 +08:00
|
|
|
break;
|
2019-07-22 15:55:15 +01:00
|
|
|
case PROP_CONNECTIVITY_CHECK_URI:
|
|
|
|
|
config_data = nm_config_get_data(priv->config);
|
|
|
|
|
g_value_set_string(value, nm_config_data_get_connectivity_uri(config_data));
|
|
|
|
|
break;
|
2013-08-22 13:06:51 -04:00
|
|
|
case PROP_PRIMARY_CONNECTION:
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_utils_g_value_set_object_path(value, priv->primary_connection);
|
2013-08-22 13:06:51 -04:00
|
|
|
break;
|
2014-10-23 13:56:52 -04:00
|
|
|
case PROP_PRIMARY_CONNECTION_TYPE:
|
2015-07-14 16:53:24 +02:00
|
|
|
type = NULL;
|
|
|
|
|
if (priv->primary_connection) {
|
|
|
|
|
NMConnection *con;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-07-14 16:53:24 +02:00
|
|
|
con = nm_active_connection_get_applied_connection(priv->primary_connection);
|
|
|
|
|
if (con)
|
|
|
|
|
type = nm_connection_get_connection_type(con);
|
|
|
|
|
}
|
2018-04-24 11:20:03 +02:00
|
|
|
g_value_set_string(value, type ?: "");
|
2014-10-23 13:56:52 -04:00
|
|
|
break;
|
2013-08-22 13:06:51 -04:00
|
|
|
case PROP_ACTIVATING_CONNECTION:
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
nm_dbus_utils_g_value_set_object_path(value, priv->activating_connection);
|
2013-08-22 13:06:51 -04:00
|
|
|
break;
|
2009-10-20 15:25:04 -07:00
|
|
|
case PROP_SLEEPING:
|
|
|
|
|
g_value_set_boolean(value, priv->sleeping);
|
|
|
|
|
break;
|
2014-01-08 12:18:33 -06:00
|
|
|
case PROP_DEVICES:
|
2021-07-29 10:02:11 +02:00
|
|
|
g_value_take_boxed(value, nm_strv_make_deep_copied(_get_devices_paths(self, FALSE)));
|
2014-01-08 12:18:33 -06:00
|
|
|
break;
|
2015-06-03 09:15:24 +02:00
|
|
|
case PROP_METERED:
|
|
|
|
|
g_value_set_uint(value, priv->metered);
|
|
|
|
|
break;
|
2015-07-03 11:06:39 +02:00
|
|
|
case PROP_GLOBAL_DNS_CONFIGURATION:
|
|
|
|
|
config_data = nm_config_get_data(priv->config);
|
|
|
|
|
dns_config = nm_config_data_get_global_dns_config(config_data);
|
|
|
|
|
nm_global_dns_config_to_dbus(dns_config, value);
|
|
|
|
|
break;
|
2014-10-06 11:21:54 -05:00
|
|
|
case PROP_ALL_DEVICES:
|
2021-07-29 10:02:11 +02:00
|
|
|
g_value_take_boxed(value, nm_strv_make_deep_copied(_get_devices_paths(self, TRUE)));
|
2014-10-06 11:21:54 -05:00
|
|
|
break;
|
2017-10-21 16:05:14 +02:00
|
|
|
case PROP_CHECKPOINTS:
|
2018-03-27 16:19:20 +02:00
|
|
|
g_value_take_boxed(
|
|
|
|
|
value,
|
2021-07-29 10:02:11 +02:00
|
|
|
priv->checkpoint_mgr ? nm_strv_make_deep_copied(
|
2018-03-27 16:19:20 +02:00
|
|
|
nm_checkpoint_manager_get_checkpoint_paths(priv->checkpoint_mgr, NULL))
|
|
|
|
|
: NULL);
|
2017-10-21 16:05:14 +02:00
|
|
|
break;
|
2009-06-11 00:39:12 -04:00
|
|
|
default:
|
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-03-20 08:51:08 -05:00
|
|
|
static void
|
2014-01-24 13:24:01 -06:00
|
|
|
set_property(GObject *object, guint prop_id, const GValue *value, GParamSpec *pspec)
|
2013-03-20 08:51:08 -05:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(object);
|
|
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2015-07-03 11:06:39 +02:00
|
|
|
NMGlobalDnsConfig *dns_config;
|
2021-11-09 13:28:54 +01:00
|
|
|
GError *error = NULL;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
switch (prop_id) {
|
|
|
|
|
case PROP_WIRELESS_ENABLED:
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update_from_user(NM_MANAGER(object),
|
|
|
|
|
NM_RFKILL_TYPE_WLAN,
|
|
|
|
|
g_value_get_boolean(value));
|
2014-01-24 13:24:01 -06:00
|
|
|
break;
|
|
|
|
|
case PROP_WWAN_ENABLED:
|
2022-02-01 22:19:36 +01:00
|
|
|
_rfkill_update_from_user(NM_MANAGER(object),
|
|
|
|
|
NM_RFKILL_TYPE_WWAN,
|
|
|
|
|
g_value_get_boolean(value));
|
2014-01-24 13:24:01 -06:00
|
|
|
break;
|
|
|
|
|
case PROP_WIMAX_ENABLED:
|
2018-09-15 07:20:54 -04:00
|
|
|
/* WIMAX is deprecated. This does nothing. */
|
2014-01-24 13:24:01 -06:00
|
|
|
break;
|
2017-08-09 15:20:04 +08:00
|
|
|
case PROP_CONNECTIVITY_CHECK_ENABLED:
|
|
|
|
|
nm_config_set_connectivity_check_enabled(priv->config, g_value_get_boolean(value));
|
|
|
|
|
break;
|
2015-07-03 11:06:39 +02:00
|
|
|
case PROP_GLOBAL_DNS_CONFIGURATION:
|
|
|
|
|
dns_config = nm_global_dns_config_from_dbus(value, &error);
|
|
|
|
|
if (!error)
|
|
|
|
|
nm_config_set_global_dns(priv->config, dns_config, &error);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-07-03 11:06:39 +02:00
|
|
|
nm_global_dns_config_free(dns_config);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-07-03 11:06:39 +02:00
|
|
|
if (error) {
|
2016-03-02 11:38:26 +01:00
|
|
|
_LOGD(LOGD_CORE, "set global DNS failed with error: %s", error->message);
|
2015-07-03 11:06:39 +02:00
|
|
|
g_error_free(error);
|
|
|
|
|
}
|
|
|
|
|
break;
|
2014-01-24 13:24:01 -06:00
|
|
|
default:
|
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec);
|
|
|
|
|
break;
|
2013-03-20 08:51:08 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-17 14:17:30 -05:00
|
|
|
static void
|
|
|
|
|
_deinit_device_factory(NMDeviceFactory *factory, gpointer user_data)
|
|
|
|
|
{
|
|
|
|
|
g_signal_handlers_disconnect_matched(factory,
|
|
|
|
|
G_SIGNAL_MATCH_DATA,
|
|
|
|
|
0,
|
|
|
|
|
0,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
NM_MANAGER(user_data));
|
|
|
|
|
}
|
|
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
static void
|
2014-01-24 13:24:01 -06:00
|
|
|
dispose(GObject *object)
|
2009-06-11 00:39:12 -04:00
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
NMManager *self = NM_MANAGER(object);
|
2017-09-29 15:18:48 +02:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(self);
|
2021-11-09 13:28:54 +01:00
|
|
|
CList *iter;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
nm_assert(c_list_is_empty(&priv->async_op_lst_head));
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2018-06-28 18:05:05 +02:00
|
|
|
g_signal_handlers_disconnect_by_func(priv->platform, G_CALLBACK(platform_link_cb), self);
|
2019-05-26 18:58:12 +02:00
|
|
|
while ((iter = c_list_first(&priv->link_cb_lst))) {
|
2017-09-29 15:04:53 +02:00
|
|
|
PlatformLinkCbData *data = c_list_entry(iter, PlatformLinkCbData, lst);
|
2009-12-23 00:03:45 -08:00
|
|
|
|
2018-04-18 09:06:54 +02:00
|
|
|
g_source_remove(data->idle_id);
|
|
|
|
|
c_list_unlink_stale(&data->lst);
|
2017-09-29 15:04:53 +02:00
|
|
|
g_slice_free(PlatformLinkCbData, data);
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2018-04-18 09:06:54 +02:00
|
|
|
|
2019-05-26 18:58:12 +02:00
|
|
|
while ((iter = c_list_first(&priv->auth_lst_head)))
|
2017-09-29 15:04:53 +02:00
|
|
|
nm_auth_chain_destroy(nm_auth_chain_parent_lst_entry(iter));
|
|
|
|
|
|
|
|
|
|
nm_clear_g_source(&priv->devices_inited_id);
|
2017-09-29 15:19:19 +02:00
|
|
|
|
2019-05-26 18:49:55 +02:00
|
|
|
nm_clear_pointer(&priv->checkpoint_mgr, nm_checkpoint_manager_free);
|
2009-12-23 00:03:45 -08:00
|
|
|
|
2017-03-14 10:42:36 +01:00
|
|
|
if (priv->concheck_mgr) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func(priv->concheck_mgr,
|
|
|
|
|
G_CALLBACK(concheck_config_changed_cb),
|
2020-09-28 16:03:33 +02:00
|
|
|
self);
|
2017-03-14 10:42:36 +01:00
|
|
|
g_clear_object(&priv->concheck_mgr);
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2017-03-14 10:42:36 +01:00
|
|
|
|
2022-04-08 12:01:51 +02:00
|
|
|
nm_clear_g_signal_handler(priv->dns_mgr, &priv->dns_mgr_update_pending_signal_id);
|
core: add nm_manager_get_dns_manager() getter
nm_dns_manager_get() is already a singleton. So users usually
can just get it whenever they need -- except during shutdown
after the singleton was destroyed. This is usually fine, because
users really should not try to get it late during shutdown.
However, if you subscribe a signal handler on the singleton, then you
will also eventually want to unsubscribe it. While the moment when you
subscribe it is clearly not during late-shutdown, it's not clear how
to ensure that the signal listener gets destroyed before the DNS manager
singleton.
So usually, whenever you are going to subscribe a signal, you need to
make sure that the target object stays alive long enough. Which may
mean to keep a reference to it.
Next, we will have NMDevice subscribe to the singleton. With above said,
that would mean that potentially every NMDevice needs to keep a
reference to the NMDnsManager. That is not best. Also, later NMManager
will face the same problem, because it will also subscribe to
NMDnsManager.
So, instead let NMManager own a reference to the NMDnsManager. This
ensures the lifetimes are properly guarded (NMDevice also references
NMManager already).
Also, access nm_dns_manager_get() lazy on first use, to only initialize
it when needed the first time (which might be quite late).
2022-04-08 11:40:42 +02:00
|
|
|
g_clear_object(&priv->dns_mgr);
|
|
|
|
|
|
all: use nm_clear_pointer() instead of g_clear_pointer()
g_clear_pointer() would always cast the destroy notify function
pointer to GDestroyNotify. That means, it lost some type safety, like
GPtrArray *ptr_arr = ...
g_clear_pointer (&ptr_arr, g_array_unref);
Since glib 2.58 ([1]), g_clear_pointer() is also more type safe. But
this is not used by NetworkManager, because we don't set
GLIB_VERSION_MIN_REQUIRED to 2.58.
[1] https://gitlab.gnome.org/GNOME/glib/-/commit/f9a9902aac826ab4aecc25f6eb533a418a4fa559
We have nm_clear_pointer() to avoid this issue for a long time (pre
1.12.0). Possibly we should redefine in our source tree g_clear_pointer()
as nm_clear_pointer(). However, I don't like to patch glib functions
with our own variant. Arguably, we do patch g_clear_error() in
such a manner. But there the point is to make the function inlinable.
Also, nm_clear_pointer() returns a boolean that indicates whether
anything was cleared. That is sometimes useful. I think we should
just consistently use nm_clear_pointer() instead, which does always
the preferable thing.
Replace:
sed 's/\<g_clear_pointer *(\([^;]*\), *\([a-z_A-Z0-9]\+\) *)/nm_clear_pointer (\1, \2)/g' $(git grep -l g_clear_pointer) -i
2020-03-23 11:09:24 +01:00
|
|
|
if (priv->auth_mgr) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func(priv->auth_mgr, G_CALLBACK(auth_mgr_changed), self);
|
|
|
|
|
g_clear_object(&priv->auth_mgr);
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2016-07-01 12:11:01 +02:00
|
|
|
|
connectivity: schedule connectivity timers per-device and probe for short outages
It might happen, that connectivitiy is lost only for a moment and
returns soon after. Based on that assumption, when we loose connectivity
we want to have a probe interval where we check for returning
connectivity more frequently.
For that, we handle tracking of the timeouts per-device.
The intervall shall start with 1 seconds, and double the interval time until
the full interval is reached. Actually, due to the implementation, it's unlikely
that we already perform the second check 1 second later. That is because commonly
the first check returns before the one second timeout is reached and bumps the
interval to 2 seconds right away.
Also, we go through extra lengths so that manual connectivity check
delay the periodic checks. By being more smart about that, we can reduce
the number of connectivity checks, but still keeping the promise to
check at least within the requested interval.
The complexity of book keeping the timeouts is remarkable. But I think
it is worth the effort and we should try hard to
- have a connectivity state as accurate as possible. Clearly,
connectivity checking means that we probing, so being more intelligent
about timeout and backoff timers can result in a better connectivity
state. The connectivity state is important because we use it for
the default-route penaly and the GUI indicates bad connectivity.
- be intelligent about avoiding redundant connectivity checks. While
we want to check often to get an accurate connectivity state, we
also want to minimize the number of HTTP requests, in case the
connectivity is established and suppossedly stable.
Also, perform connectivity checks in every state of the device.
Even if a device is disconnected, it still might have connectivity,
for example if the user externally adds an IP address on an unmanaged
device.
https://bugzilla.gnome.org/show_bug.cgi?id=792240
2018-02-20 21:41:14 +01:00
|
|
|
nm_assert(c_list_is_empty(&priv->devices_lst_head));
|
|
|
|
|
|
2016-07-01 14:25:34 +02:00
|
|
|
nm_clear_g_source(&priv->ac_cleanup_id);
|
2009-12-23 00:18:18 -08:00
|
|
|
|
core: track devices in manager via embedded CList
Instead of using a GSList for tracking the devices, use a CList.
I think a CList is in most cases the more suitable data structure
then GSList:
- you can find out in O(1) whether the object is linked. That
is nice, for example to assert in NMDevice's destructor that
the object was unlinked, and we will use that later in
nm_manager_get_device_by_path().
- you can unlink the element in O(1) and you can unlink the
element without having access to the link's head
- Contrary to GSList, this does not require an extra slice
allocation for the link node. It quite possibliy consumes
slightly less memory because the CList structure is embedded
in a struct that we already allocate. Even if slice allocation
would be perfect to only consume 2*sizeof(gpointer) for the link
note, it would at most be as-good as CList. Quite possibly,
there is an overhead though.
- CList possibly has better memory locality, because the link
structure and the data are close to each other.
Something which could be seen as disavantage, is that with CList
one device can only be tracked in one NMManager instance at a time.
But that is fine. There exists only one NMManager instance for now,
and even if we would ever introduce multiple managers, we probably
would not associate one NMDevice instance with multiple managers.
The advantages are arguably not huge, but CList is IMHO clearly the
more suited data structure. No need to stick to a suboptimal data
structure for the job. Refactor it.
2018-03-23 21:51:07 +01:00
|
|
|
while ((iter = c_list_first(&priv->active_connections_lst_head)))
|
|
|
|
|
active_connection_remove(self,
|
|
|
|
|
c_list_entry(iter, NMActiveConnection, active_connections_lst));
|
2010-04-08 18:23:43 -07:00
|
|
|
|
2016-01-04 09:46:22 +01:00
|
|
|
nm_assert(c_list_is_empty(&priv->active_connections_lst_head));
|
|
|
|
|
g_clear_object(&priv->primary_connection);
|
|
|
|
|
g_clear_object(&priv->activating_connection);
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2019-05-26 18:58:12 +02:00
|
|
|
if (priv->config) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func(priv->config, _config_changed_cb, self);
|
|
|
|
|
g_clear_object(&priv->config);
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2019-05-26 18:58:12 +02:00
|
|
|
|
2017-11-23 21:30:09 +01:00
|
|
|
if (priv->policy) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func(priv->policy, policy_default_ac_changed, self);
|
2014-01-24 13:24:01 -06:00
|
|
|
g_signal_handlers_disconnect_by_func(priv->policy, policy_activating_ac_changed, self);
|
|
|
|
|
g_clear_object(&priv->policy);
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2015-01-30 19:52:53 +01:00
|
|
|
if (priv->settings) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func(priv->settings,
|
2017-09-29 15:18:48 +02:00
|
|
|
settings_startup_complete_changed,
|
2020-09-28 16:03:33 +02:00
|
|
|
self);
|
2017-09-29 15:18:48 +02:00
|
|
|
g_signal_handlers_disconnect_by_func(priv->settings,
|
|
|
|
|
system_unmanaged_devices_changed_cb,
|
2020-09-28 16:03:33 +02:00
|
|
|
self);
|
2017-09-29 15:18:48 +02:00
|
|
|
g_signal_handlers_disconnect_by_func(priv->settings, connection_added_cb, self);
|
|
|
|
|
g_signal_handlers_disconnect_by_func(priv->settings, connection_updated_cb, self);
|
|
|
|
|
g_signal_handlers_disconnect_by_func(priv->settings, connection_flags_changed, self);
|
2015-01-30 19:52:53 +01:00
|
|
|
g_clear_object(&priv->settings);
|
|
|
|
|
}
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
if (priv->hostname_manager) {
|
2022-01-04 18:28:46 +01:00
|
|
|
g_signal_handlers_disconnect_by_func(priv->hostname_manager,
|
|
|
|
|
_static_hostname_changed_cb,
|
|
|
|
|
self);
|
2014-01-24 13:24:01 -06:00
|
|
|
g_clear_object(&priv->hostname_manager);
|
|
|
|
|
}
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2015-09-21 14:30:02 +02:00
|
|
|
g_clear_object(&priv->vpn_manager);
|
|
|
|
|
|
2022-07-11 16:06:14 +02:00
|
|
|
if (priv->sleep_devices) {
|
|
|
|
|
sleep_devices_clear(self);
|
|
|
|
|
nm_clear_pointer(&priv->sleep_devices, g_hash_table_unref);
|
|
|
|
|
}
|
2017-04-23 14:20:37 +02:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
if (priv->sleep_monitor) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func(priv->sleep_monitor, sleeping_cb, self);
|
|
|
|
|
g_clear_object(&priv->sleep_monitor);
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2009-06-11 00:39:12 -04:00
|
|
|
|
2017-09-29 15:18:48 +02:00
|
|
|
if (priv->fw_monitor) {
|
all: use nm_clear_pointer() instead of g_clear_pointer()
g_clear_pointer() would always cast the destroy notify function
pointer to GDestroyNotify. That means, it lost some type safety, like
GPtrArray *ptr_arr = ...
g_clear_pointer (&ptr_arr, g_array_unref);
Since glib 2.58 ([1]), g_clear_pointer() is also more type safe. But
this is not used by NetworkManager, because we don't set
GLIB_VERSION_MIN_REQUIRED to 2.58.
[1] https://gitlab.gnome.org/GNOME/glib/-/commit/f9a9902aac826ab4aecc25f6eb533a418a4fa559
We have nm_clear_pointer() to avoid this issue for a long time (pre
1.12.0). Possibly we should redefine in our source tree g_clear_pointer()
as nm_clear_pointer(). However, I don't like to patch glib functions
with our own variant. Arguably, we do patch g_clear_error() in
such a manner. But there the point is to make the function inlinable.
Also, nm_clear_pointer() returns a boolean that indicates whether
anything was cleared. That is sometimes useful. I think we should
just consistently use nm_clear_pointer() instead, which does always
the preferable thing.
Replace:
sed 's/\<g_clear_pointer *(\([^;]*\), *\([a-z_A-Z0-9]\+\) *)/nm_clear_pointer (\1, \2)/g' $(git grep -l g_clear_pointer) -i
2020-03-23 11:09:24 +01:00
|
|
|
g_signal_handlers_disconnect_by_func(priv->fw_monitor, firmware_dir_changed, self);
|
2016-05-05 14:14:40 +02:00
|
|
|
|
2015-01-05 16:37:45 +01:00
|
|
|
nm_clear_g_source(&priv->fw_changed_id);
|
2010-05-28 18:23:00 -07:00
|
|
|
|
2017-09-29 15:18:48 +02:00
|
|
|
g_file_monitor_cancel(priv->fw_monitor);
|
|
|
|
|
g_clear_object(&priv->fw_monitor);
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2010-08-31 15:45:55 -05:00
|
|
|
|
2016-01-04 09:46:22 +01:00
|
|
|
if (priv->rfkill_mgr) {
|
|
|
|
|
g_signal_handlers_disconnect_by_func(priv->rfkill_mgr,
|
|
|
|
|
rfkill_manager_rfkill_changed_cb,
|
2020-09-28 16:03:33 +02:00
|
|
|
self);
|
2016-01-04 09:46:22 +01:00
|
|
|
g_clear_object(&priv->rfkill_mgr);
|
2020-09-28 16:03:33 +02:00
|
|
|
}
|
2010-07-01 10:32:11 -07:00
|
|
|
|
2014-01-24 13:24:01 -06:00
|
|
|
nm_clear_g_source(&priv->delete_volatile_connection_idle_id);
|
2018-03-10 16:18:16 +01:00
|
|
|
_delete_volatile_connection_all(self, FALSE);
|
|
|
|
|
nm_assert(!priv->delete_volatile_connection_idle_id);
|
|
|
|
|
nm_assert(c_list_is_empty(&priv->delete_volatile_connection_lst_head));
|
2010-07-01 10:32:11 -07:00
|
|
|
|
2017-09-29 15:18:48 +02:00
|
|
|
nm_device_factory_manager_for_each_factory(_deinit_device_factory, self);
|
2015-09-21 14:38:25 +02:00
|
|
|
|
2018-03-10 16:18:16 +01:00
|
|
|
nm_clear_g_source(&priv->timestamp_update_id);
|
|
|
|
|
|
2017-09-29 15:18:48 +02:00
|
|
|
nm_clear_pointer(&priv->device_route_metrics, g_hash_table_destroy);
|
2016-01-18 14:18:50 +01:00
|
|
|
|
2016-09-23 13:24:10 +02:00
|
|
|
G_OBJECT_CLASS(nm_manager_parent_class)->dispose(object);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
finalize(GObject *object)
|
|
|
|
|
{
|
2020-02-14 10:50:25 +01:00
|
|
|
NMManagerPrivate *priv = NM_MANAGER_GET_PRIVATE(object);
|
2016-09-23 13:24:10 +02:00
|
|
|
|
2016-09-15 23:34:24 +03:00
|
|
|
g_array_free(priv->capabilities, TRUE);
|
|
|
|
|
|
2016-09-23 13:24:10 +02:00
|
|
|
G_OBJECT_CLASS(nm_manager_parent_class)->finalize(object);
|
2017-09-29 15:11:33 +02:00
|
|
|
|
|
|
|
|
g_object_unref(priv->platform);
|
2009-06-11 00:39:12 -04:00
|
|
|
}
|
|
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
static const GDBusSignalInfo signal_info_check_permissions =
|
|
|
|
|
NM_DEFINE_GDBUS_SIGNAL_INFO_INIT("CheckPermissions", );
|
|
|
|
|
|
|
|
|
|
static const GDBusSignalInfo signal_info_state_changed = NM_DEFINE_GDBUS_SIGNAL_INFO_INIT(
|
|
|
|
|
"StateChanged",
|
|
|
|
|
.args = NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("state", "u"), ), );
|
|
|
|
|
|
|
|
|
|
static const GDBusSignalInfo signal_info_device_added = NM_DEFINE_GDBUS_SIGNAL_INFO_INIT(
|
|
|
|
|
"DeviceAdded",
|
|
|
|
|
.args = NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("device_path", "o"), ), );
|
|
|
|
|
|
|
|
|
|
static const GDBusSignalInfo signal_info_device_removed = NM_DEFINE_GDBUS_SIGNAL_INFO_INIT(
|
|
|
|
|
"DeviceRemoved",
|
|
|
|
|
.args = NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("device_path", "o"), ), );
|
|
|
|
|
|
|
|
|
|
static const NMDBusInterfaceInfoExtended interface_info_manager = {
|
|
|
|
|
.parent = NM_DEFINE_GDBUS_INTERFACE_INFO_INIT(
|
|
|
|
|
NM_DBUS_INTERFACE,
|
|
|
|
|
.methods = NM_DEFINE_GDBUS_METHOD_INFOS(
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT("Reload",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("flags", "u"), ), ),
|
|
|
|
|
.handle = impl_manager_reload, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"GetDevices",
|
|
|
|
|
.out_args =
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("devices", "ao"), ), ),
|
|
|
|
|
.handle = impl_manager_get_devices, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"GetAllDevices",
|
|
|
|
|
.out_args =
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("devices", "ao"), ), ),
|
|
|
|
|
.handle = impl_manager_get_all_devices, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"GetDeviceByIpIface",
|
2018-06-22 15:53:46 +02:00
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("iface", "s"), ),
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
.out_args =
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("device", "o"), ), ),
|
|
|
|
|
.handle = impl_manager_get_device_by_ip_iface, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"ActivateConnection",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("connection", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("device", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("specific_object", "o"), ),
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("active_connection", "o"), ), ),
|
|
|
|
|
.handle = impl_manager_activate_connection, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"AddAndActivateConnection",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("connection", "a{sa{sv}}"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("device", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("specific_object", "o"), ),
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
2018-10-30 16:40:40 +01:00
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("path", "o"),
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("active_connection", "o"), ), ),
|
|
|
|
|
.handle = impl_manager_add_and_activate_connection, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"AddAndActivateConnection2",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("connection", "a{sa{sv}}"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("device", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("specific_object", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("options", "a{sv}"), ),
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("path", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("active_connection", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("result", "a{sv}"), ), ),
|
|
|
|
|
.handle = impl_manager_add_and_activate_connection, ),
|
2018-10-30 16:40:40 +01:00
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"DeactivateConnection",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("active_connection", "o"), ), ),
|
|
|
|
|
.handle = impl_manager_deactivate_connection, ),
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
2018-10-30 16:40:40 +01:00
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT("Sleep",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("sleep", "b"), ), ),
|
|
|
|
|
.handle = impl_manager_sleep, ),
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
2018-10-30 16:40:40 +01:00
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT("Enable",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("enable", "b"), ), ),
|
|
|
|
|
.handle = impl_manager_enable, ),
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"GetPermissions",
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("permissions", "a{ss}"), ), ),
|
|
|
|
|
.handle = impl_manager_get_permissions, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT("SetLogging",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("level", "s"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("domains", "s"), ), ),
|
|
|
|
|
.handle = impl_manager_set_logging, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT("GetLogging",
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("level", "s"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("domains", "s"), ), ),
|
|
|
|
|
.handle = impl_manager_get_logging, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"CheckConnectivity",
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("connectivity", "u"), ), ),
|
|
|
|
|
.handle = impl_manager_check_connectivity, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT("state",
|
|
|
|
|
.out_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("state", "u"), ), ),
|
|
|
|
|
.handle = impl_manager_state, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"CheckpointCreate",
|
|
|
|
|
.in_args =
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("devices", "ao"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("rollback_timeout", "u"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("flags", "u"), ),
|
|
|
|
|
.out_args =
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("checkpoint", "o"), ), ),
|
|
|
|
|
.handle = impl_manager_checkpoint_create, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"CheckpointDestroy",
|
|
|
|
|
.in_args =
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("checkpoint", "o"), ), ),
|
|
|
|
|
.handle = impl_manager_checkpoint_destroy, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"CheckpointRollback",
|
|
|
|
|
.in_args =
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("checkpoint", "o"), ),
|
|
|
|
|
.out_args =
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFOS(NM_DEFINE_GDBUS_ARG_INFO("result", "a{su}"), ), ),
|
|
|
|
|
.handle = impl_manager_checkpoint_rollback, ),
|
|
|
|
|
NM_DEFINE_DBUS_METHOD_INFO_EXTENDED(
|
|
|
|
|
NM_DEFINE_GDBUS_METHOD_INFO_INIT(
|
|
|
|
|
"CheckpointAdjustRollbackTimeout",
|
|
|
|
|
.in_args = NM_DEFINE_GDBUS_ARG_INFOS(
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("checkpoint", "o"),
|
|
|
|
|
NM_DEFINE_GDBUS_ARG_INFO("add_timeout", "u"), ), ),
|
|
|
|
|
.handle = impl_manager_checkpoint_adjust_rollback_timeout, ), ),
|
2021-05-12 18:18:57 +02:00
|
|
|
.signals = NM_DEFINE_GDBUS_SIGNAL_INFOS(&signal_info_check_permissions,
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
&signal_info_state_changed,
|
|
|
|
|
&signal_info_device_added,
|
|
|
|
|
&signal_info_device_removed, ),
|
|
|
|
|
.properties = NM_DEFINE_GDBUS_PROPERTY_INFOS(
|
2021-05-12 18:18:57 +02:00
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("Devices", "ao", NM_MANAGER_DEVICES),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("AllDevices",
|
|
|
|
|
"ao",
|
|
|
|
|
NM_MANAGER_ALL_DEVICES),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("Checkpoints",
|
|
|
|
|
"ao",
|
|
|
|
|
NM_MANAGER_CHECKPOINTS),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("NetworkingEnabled",
|
|
|
|
|
"b",
|
|
|
|
|
NM_MANAGER_NETWORKING_ENABLED),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READWRITABLE(
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
"WirelessEnabled",
|
2020-09-28 16:03:33 +02:00
|
|
|
"b",
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NM_MANAGER_WIRELESS_ENABLED,
|
|
|
|
|
NM_AUTH_PERMISSION_ENABLE_DISABLE_WIFI,
|
|
|
|
|
NM_AUDIT_OP_RADIO_CONTROL),
|
2021-05-12 18:18:57 +02:00
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("WirelessHardwareEnabled",
|
|
|
|
|
"b",
|
|
|
|
|
NM_MANAGER_WIRELESS_HARDWARE_ENABLED),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READWRITABLE(
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
"WwanEnabled",
|
2020-09-28 16:03:33 +02:00
|
|
|
"b",
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NM_MANAGER_WWAN_ENABLED,
|
|
|
|
|
NM_AUTH_PERMISSION_ENABLE_DISABLE_WWAN,
|
|
|
|
|
NM_AUDIT_OP_RADIO_CONTROL),
|
2021-05-12 18:18:57 +02:00
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("WwanHardwareEnabled",
|
|
|
|
|
"b",
|
|
|
|
|
NM_MANAGER_WWAN_HARDWARE_ENABLED),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READWRITABLE(
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
"WimaxEnabled",
|
2020-09-28 16:03:33 +02:00
|
|
|
"b",
|
checkpoint: allow resetting the rollback timeout via D-Bus
This allows to adjust the timeout of an existing checkpoint.
The main usecase of checkpoints, is to have a fail-safe when
configuring the network remotely. By allowing to reset the timeout,
the user can perform a series of actions, and keep bumping the
timeout. That way, the entire series is still guarded by the same
checkpoint, but the user can start with short timeout, and
re-adjust the timeout as he goes along.
The libnm API only implements the async form (at least for now).
Sync methods are fundamentally wrong with D-Bus, and it's probably
not needed. Also, follow glib convenction, where the async form
doesn't have the _async name suffix. Also, accept a D-Bus path
as argument, not a NMCheckpoint instance. The libnm API should
not be more restricted than the underlying D-Bus API. It would
be cumbersome to require the user to lookup the NMCheckpoint
instance first, especially since libnm doesn't provide an efficient
or convenient lookup-by-path method. On the other hand, retrieving
the path from a NMCheckpoint instance is always possible.
2018-03-28 08:09:56 +02:00
|
|
|
NM_MANAGER_WIMAX_ENABLED,
|
|
|
|
|
NM_AUTH_PERMISSION_ENABLE_DISABLE_WIMAX,
|
|
|
|
|
NM_AUDIT_OP_RADIO_CONTROL),
|
2021-05-12 18:18:57 +02:00
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("WimaxHardwareEnabled",
|
|
|
|
|
"b",
|
|
|
|
|
NM_MANAGER_WIMAX_HARDWARE_ENABLED),
|
2022-03-21 10:19:37 +01:00
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("RadioFlags",
|
|
|
|
|
"u",
|
|
|
|
|
NM_MANAGER_RADIO_FLAGS),
|
2021-05-12 18:18:57 +02:00
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("ActiveConnections",
|
|
|
|
|
"ao",
|
|
|
|
|
NM_MANAGER_ACTIVE_CONNECTIONS),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("PrimaryConnection",
|
|
|
|
|
"o",
|
|
|
|
|
NM_MANAGER_PRIMARY_CONNECTION),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("PrimaryConnectionType",
|
|
|
|
|
"s",
|
|
|
|
|
NM_MANAGER_PRIMARY_CONNECTION_TYPE),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("Metered", "u", NM_MANAGER_METERED),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("ActivatingConnection",
|
|
|
|
|
"o",
|
|
|
|
|
NM_MANAGER_ACTIVATING_CONNECTION),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("Startup", "b", NM_MANAGER_STARTUP),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("Version", "s", NM_MANAGER_VERSION),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("Capabilities",
|
|
|
|
|
"au",
|
|
|
|
|
NM_MANAGER_CAPABILITIES),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("State", "u", NM_MANAGER_STATE),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("Connectivity",
|
|
|
|
|
"u",
|
|
|
|
|
NM_MANAGER_CONNECTIVITY),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("ConnectivityCheckAvailable",
|
|
|
|
|
"b",
|
|
|
|
|
NM_MANAGER_CONNECTIVITY_CHECK_AVAILABLE),
|
|
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READWRITABLE(
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
"ConnectivityCheckEnabled",
|
|
|
|
|
"b",
|
|
|
|
|
NM_MANAGER_CONNECTIVITY_CHECK_ENABLED,
|
|
|
|
|
NM_AUTH_PERMISSION_ENABLE_DISABLE_CONNECTIVITY_CHECK,
|
|
|
|
|
NM_AUDIT_OP_NET_CONTROL),
|
2019-07-22 15:55:15 +01:00
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READABLE("ConnectivityCheckUri",
|
|
|
|
|
"s",
|
|
|
|
|
NM_MANAGER_CONNECTIVITY_CHECK_URI),
|
2021-05-12 18:18:57 +02:00
|
|
|
NM_DEFINE_DBUS_PROPERTY_INFO_EXTENDED_READWRITABLE(
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
"GlobalDnsConfiguration",
|
|
|
|
|
"a{sv}",
|
|
|
|
|
NM_MANAGER_GLOBAL_DNS_CONFIGURATION,
|
|
|
|
|
NM_AUTH_PERMISSION_SETTINGS_MODIFY_GLOBAL_DNS,
|
|
|
|
|
NM_AUDIT_OP_NET_CONTROL), ), ),
|
|
|
|
|
};
|
|
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
static void
|
|
|
|
|
nm_manager_class_init(NMManagerClass *manager_class)
|
|
|
|
|
{
|
2021-11-09 13:28:54 +01:00
|
|
|
GObjectClass *object_class = G_OBJECT_CLASS(manager_class);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
NMDBusObjectClass *dbus_object_class = NM_DBUS_OBJECT_CLASS(manager_class);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2021-08-31 13:20:59 +02:00
|
|
|
#if WITH_OPENVSWITCH
|
|
|
|
|
/* Use the symbols. These symbols are in NetworkManager binary but will be
|
|
|
|
|
* used by the OVS device plugin. If we don't use the symbol here, it will
|
|
|
|
|
* be wrongly dropped. */
|
2021-12-13 16:06:16 +01:00
|
|
|
manager_class->_use_symbol_nm_priv_helper_call_get_fd =
|
|
|
|
|
(void (*)(void)) nm_priv_helper_call_get_fd;
|
|
|
|
|
manager_class->_use_symbol_nm_priv_helper_utils_open_fd =
|
|
|
|
|
(void (*)(void)) nm_priv_helper_utils_open_fd;
|
2021-08-31 13:20:59 +02:00
|
|
|
#endif
|
|
|
|
|
|
2018-03-13 10:14:06 +01:00
|
|
|
dbus_object_class->export_path = NM_DBUS_EXPORT_PATH_STATIC(NM_DBUS_PATH);
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
dbus_object_class->interface_infos = NM_DBUS_INTERFACE_INFOS(&interface_info_manager);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-01-18 14:18:50 +01:00
|
|
|
object_class->constructed = constructed;
|
2009-06-11 00:39:12 -04:00
|
|
|
object_class->set_property = set_property;
|
|
|
|
|
object_class->get_property = get_property;
|
|
|
|
|
object_class->dispose = dispose;
|
2016-09-23 13:24:10 +02:00
|
|
|
object_class->finalize = finalize;
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_VERSION] = g_param_spec_string(NM_MANAGER_VERSION,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-09-15 23:34:24 +03:00
|
|
|
obj_properties[PROP_CAPABILITIES] =
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
g_param_spec_variant(NM_MANAGER_CAPABILITIES,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
G_VARIANT_TYPE("au"),
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_STATE] = g_param_spec_uint(NM_MANAGER_STATE,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
0,
|
|
|
|
|
NM_STATE_DISCONNECTED,
|
|
|
|
|
0,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_STARTUP] = g_param_spec_boolean(NM_MANAGER_STARTUP,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_NETWORKING_ENABLED] =
|
|
|
|
|
g_param_spec_boolean(NM_MANAGER_NETWORKING_ENABLED,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_WIRELESS_ENABLED] =
|
|
|
|
|
g_param_spec_boolean(NM_MANAGER_WIRELESS_ENABLED,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_WIRELESS_HARDWARE_ENABLED] =
|
|
|
|
|
g_param_spec_boolean(NM_MANAGER_WIRELESS_HARDWARE_ENABLED,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_WWAN_ENABLED] =
|
|
|
|
|
g_param_spec_boolean(NM_MANAGER_WWAN_ENABLED,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_WWAN_HARDWARE_ENABLED] =
|
|
|
|
|
g_param_spec_boolean(NM_MANAGER_WWAN_HARDWARE_ENABLED,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_WIMAX_ENABLED] =
|
|
|
|
|
g_param_spec_boolean(NM_MANAGER_WIMAX_ENABLED,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_WIMAX_HARDWARE_ENABLED] =
|
|
|
|
|
g_param_spec_boolean(NM_MANAGER_WIMAX_HARDWARE_ENABLED,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2022-03-21 10:19:37 +01:00
|
|
|
|
|
|
|
|
obj_properties[PROP_RADIO_FLAGS] = g_param_spec_uint(NM_MANAGER_RADIO_FLAGS,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
0,
|
|
|
|
|
G_MAXUINT32,
|
|
|
|
|
NM_RADIO_FLAG_NONE,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_ACTIVE_CONNECTIONS] =
|
|
|
|
|
g_param_spec_boxed(NM_MANAGER_ACTIVE_CONNECTIONS,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
G_TYPE_STRV,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_CONNECTIVITY] =
|
|
|
|
|
g_param_spec_uint(NM_MANAGER_CONNECTIVITY,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
NM_CONNECTIVITY_UNKNOWN,
|
|
|
|
|
NM_CONNECTIVITY_FULL,
|
|
|
|
|
NM_CONNECTIVITY_UNKNOWN,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-08-09 15:20:04 +08:00
|
|
|
obj_properties[PROP_CONNECTIVITY_CHECK_AVAILABLE] =
|
|
|
|
|
g_param_spec_boolean(NM_MANAGER_CONNECTIVITY_CHECK_AVAILABLE,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-08-09 15:20:04 +08:00
|
|
|
obj_properties[PROP_CONNECTIVITY_CHECK_ENABLED] =
|
|
|
|
|
g_param_spec_boolean(NM_MANAGER_CONNECTIVITY_CHECK_ENABLED,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
TRUE,
|
|
|
|
|
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2019-07-22 15:55:15 +01:00
|
|
|
obj_properties[PROP_CONNECTIVITY_CHECK_URI] =
|
|
|
|
|
g_param_spec_string(NM_MANAGER_CONNECTIVITY_CHECK_URI,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_PRIMARY_CONNECTION] =
|
|
|
|
|
g_param_spec_string(NM_MANAGER_PRIMARY_CONNECTION,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_PRIMARY_CONNECTION_TYPE] =
|
|
|
|
|
g_param_spec_string(NM_MANAGER_PRIMARY_CONNECTION_TYPE,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_ACTIVATING_CONNECTION] =
|
|
|
|
|
g_param_spec_string(NM_MANAGER_ACTIVATING_CONNECTION,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2010-05-22 08:55:30 -07:00
|
|
|
/* Sleeping is not exported over D-Bus */
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_SLEEPING] = g_param_spec_boolean(NM_MANAGER_SLEEPING,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
FALSE,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_DEVICES] = g_param_spec_boxed(NM_MANAGER_DEVICES,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
G_TYPE_STRV,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-06-03 09:15:24 +02:00
|
|
|
/**
|
|
|
|
|
* NMManager:metered:
|
|
|
|
|
*
|
|
|
|
|
* Whether the connectivity is metered.
|
|
|
|
|
*
|
|
|
|
|
* Since: 1.2
|
|
|
|
|
**/
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_METERED] = g_param_spec_uint(NM_MANAGER_METERED,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
0,
|
|
|
|
|
G_MAXUINT32,
|
|
|
|
|
NM_METERED_UNKNOWN,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2015-07-03 11:06:39 +02:00
|
|
|
/**
|
|
|
|
|
* NMManager:global-dns-configuration:
|
|
|
|
|
*
|
|
|
|
|
* The global DNS configuration.
|
|
|
|
|
*
|
|
|
|
|
* Since: 1.2
|
|
|
|
|
**/
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_GLOBAL_DNS_CONFIGURATION] =
|
|
|
|
|
g_param_spec_variant(NM_MANAGER_GLOBAL_DNS_CONFIGURATION,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
G_VARIANT_TYPE("a{sv}"),
|
|
|
|
|
NULL,
|
|
|
|
|
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-10-06 11:21:54 -05:00
|
|
|
/**
|
|
|
|
|
* NMManager:all-devices:
|
|
|
|
|
*
|
|
|
|
|
* All devices, including those that are not realized.
|
|
|
|
|
*
|
|
|
|
|
* Since: 1.2
|
|
|
|
|
**/
|
2016-04-01 17:34:51 +02:00
|
|
|
obj_properties[PROP_ALL_DEVICES] =
|
|
|
|
|
g_param_spec_boxed(NM_MANAGER_ALL_DEVICES,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
G_TYPE_STRV,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2017-10-21 16:05:14 +02:00
|
|
|
obj_properties[PROP_CHECKPOINTS] =
|
|
|
|
|
g_param_spec_boxed(NM_MANAGER_CHECKPOINTS,
|
|
|
|
|
"",
|
|
|
|
|
"",
|
|
|
|
|
G_TYPE_STRV,
|
|
|
|
|
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
g_object_class_install_properties(object_class, _PROPERTY_ENUMS_LAST, obj_properties);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2009-06-11 00:39:12 -04:00
|
|
|
/* signals */
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
/* emitted only for realized devices */
|
2016-04-04 14:17:09 +02:00
|
|
|
signals[DEVICE_ADDED] = g_signal_new(NM_MANAGER_DEVICE_ADDED,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_OBJECT_CLASS_TYPE(object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST,
|
2016-04-28 14:05:11 +02:00
|
|
|
0,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_TYPE_NONE,
|
|
|
|
|
1,
|
|
|
|
|
NM_TYPE_DEVICE);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-10-06 11:21:54 -05:00
|
|
|
/* Emitted for both realized devices and placeholder devices */
|
2016-04-04 14:17:09 +02:00
|
|
|
signals[INTERNAL_DEVICE_ADDED] = g_signal_new(NM_MANAGER_INTERNAL_DEVICE_ADDED,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_OBJECT_CLASS_TYPE(object_class),
|
2019-03-06 10:57:21 +01:00
|
|
|
G_SIGNAL_RUN_FIRST,
|
|
|
|
|
0,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_TYPE_NONE,
|
|
|
|
|
1,
|
|
|
|
|
G_TYPE_OBJECT);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
core/dbus: rework D-Bus implementation to use lower layer GDBusConnection API
Previously, we used the generated GDBusInterfaceSkeleton types and glued
them via the NMExportedObject base class to our NM types. We also used
GDBusObjectManagerServer.
Don't do that anymore. The resulting code was more complicated despite (or
because?) using generated classes. It was hard to understand, complex, had
ordering-issues, and had a runtime and memory overhead.
This patch refactors this entirely and uses the lower layer API GDBusConnection
directly. It replaces the generated code, GDBusInterfaceSkeleton, and
GDBusObjectManagerServer. All this is now done by NMDbusObject and NMDBusManager
and static descriptor instances of type GDBusInterfaceInfo.
This adds a net plus of more then 1300 lines of hand written code. I claim
that this implementation is easier to understand. Note that previously we
also required extensive and complex glue code to bind our objects to the
generated skeleton objects. Instead, now glue our objects directly to
GDBusConnection. The result is more immediate and gets rid of layers of
code in between.
Now that the D-Bus glue us more under our control, we can address issus and
bottlenecks better, instead of adding code to bend the generated skeletons
to our needs.
Note that the current implementation now only supports one D-Bus connection.
That was effectively the case already, although there were places (and still are)
where the code pretends it could also support connections from a private socket.
We dropped private socket support mainly because it was unused, untested and
buggy, but also because GDBusObjectManagerServer could not export the same
objects on multiple connections. Now, it would be rather straight forward to
fix that and re-introduce ObjectManager on each private connection. But this
commit doesn't do that yet, and the new code intentionally supports only one
D-Bus connection.
Also, the D-Bus startup was simplified. There is no retry, either nm_dbus_manager_start()
succeeds, or it detects the initrd case. In the initrd case, bus manager never tries to
connect to D-Bus. Since the initrd scenario is not yet used/tested, this is good enough
for the moment. It could be easily extended later, for example with polling whether the
system bus appears (like was done previously). Also, restart of D-Bus daemon isn't
supported either -- just like before.
Note how NMDBusManager now implements the ObjectManager D-Bus interface
directly.
Also, this fixes race issues in the server, by no longer delaying
PropertiesChanged signals. NMExportedObject would collect changed
properties and send the signal out in idle_emit_properties_changed()
on idle. This messes up the ordering of change events w.r.t. other
signals and events on the bus. Note that not only NMExportedObject
messed up the ordering. Also the generated code would hook into
notify() and process change events in and idle handle, exhibiting the
same ordering issue too.
No longer do that. PropertiesChanged signals will be sent right away
by hooking into dispatch_properties_changed(). This means, changing
a property in quick succession will no longer be combined and is
guaranteed to emit signals for each individual state. Quite possibly
we emit now more PropertiesChanged signals then before.
However, we are now able to group a set of changes by using standard
g_object_freeze_notify()/g_object_thaw_notify(). We probably should
make more use of that.
Also, now that our signals are all handled in the right order, we
might find places where we still emit them in the wrong order. But that
is then due to the order in which our GObjects emit signals, not due
to an ill behavior of the D-Bus glue. Possibly we need to identify
such ordering issues and fix them.
Numbers (for contrib/rpm --without debug on x86_64):
- the patch changes the code size of NetworkManager by
- 2809360 bytes
+ 2537528 bytes (-9.7%)
- Runtime measurements are harder because there is a large variance
during testing. In other words, the numbers are not reproducible.
Currently, the implementation performs no caching of GVariants at all,
but it would be rather simple to add it, if that turns out to be
useful.
Anyway, without strong claim, it seems that the new form tends to
perform slightly better. That would be no surprise.
$ time (for i in {1..1000}; do nmcli >/dev/null || break; echo -n .; done)
- real 1m39.355s
+ real 1m37.432s
$ time (for i in {1..2000}; do busctl call org.freedesktop.NetworkManager /org/freedesktop org.freedesktop.DBus.ObjectManager GetManagedObjects > /dev/null || break; echo -n .; done)
- real 0m26.843s
+ real 0m25.281s
- Regarding RSS size, just looking at the processes in similar
conditions, doesn't give a large difference. On my system they
consume about 19MB RSS. It seems that the new version has a
slightly smaller RSS size.
- 19356 RSS
+ 18660 RSS
2018-02-26 13:51:52 +01:00
|
|
|
/* emitted only for realized devices when a device
|
2016-09-14 14:15:24 +02:00
|
|
|
* becomes unrealized or removed */
|
2016-04-04 14:17:09 +02:00
|
|
|
signals[DEVICE_REMOVED] = g_signal_new(NM_MANAGER_DEVICE_REMOVED,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_OBJECT_CLASS_TYPE(object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST,
|
2016-04-28 14:05:11 +02:00
|
|
|
0,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_TYPE_NONE,
|
|
|
|
|
1,
|
|
|
|
|
NM_TYPE_DEVICE);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2014-10-06 11:21:54 -05:00
|
|
|
/* Emitted for both realized devices and placeholder devices */
|
2016-04-04 14:17:09 +02:00
|
|
|
signals[INTERNAL_DEVICE_REMOVED] = g_signal_new(NM_MANAGER_INTERNAL_DEVICE_REMOVED,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_OBJECT_CLASS_TYPE(object_class),
|
2019-03-06 10:57:21 +01:00
|
|
|
G_SIGNAL_RUN_FIRST,
|
|
|
|
|
0,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
2016-04-01 17:34:51 +02:00
|
|
|
G_TYPE_NONE,
|
|
|
|
|
1,
|
|
|
|
|
G_TYPE_OBJECT);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
signals[ACTIVE_CONNECTION_ADDED] = g_signal_new(NM_MANAGER_ACTIVE_CONNECTION_ADDED,
|
|
|
|
|
G_OBJECT_CLASS_TYPE(object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST,
|
|
|
|
|
0,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
G_TYPE_NONE,
|
|
|
|
|
1,
|
|
|
|
|
NM_TYPE_ACTIVE_CONNECTION);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
signals[ACTIVE_CONNECTION_REMOVED] = g_signal_new(NM_MANAGER_ACTIVE_CONNECTION_REMOVED,
|
|
|
|
|
G_OBJECT_CLASS_TYPE(object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST,
|
|
|
|
|
0,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
G_TYPE_NONE,
|
|
|
|
|
1,
|
|
|
|
|
NM_TYPE_ACTIVE_CONNECTION);
|
2020-09-28 16:03:33 +02:00
|
|
|
|
2016-04-01 17:34:51 +02:00
|
|
|
signals[CONFIGURE_QUIT] = g_signal_new(NM_MANAGER_CONFIGURE_QUIT,
|
|
|
|
|
G_OBJECT_CLASS_TYPE(object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST,
|
|
|
|
|
0,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
G_TYPE_NONE,
|
|
|
|
|
0);
|
2021-01-19 18:57:58 +01:00
|
|
|
|
|
|
|
|
signals[DEVICE_IFINDEX_CHANGED] = g_signal_new(NM_MANAGER_DEVICE_IFINDEX_CHANGED,
|
|
|
|
|
G_OBJECT_CLASS_TYPE(object_class),
|
|
|
|
|
G_SIGNAL_RUN_FIRST,
|
|
|
|
|
0,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
NULL,
|
|
|
|
|
G_TYPE_NONE,
|
|
|
|
|
1,
|
|
|
|
|
NM_TYPE_DEVICE);
|
2009-06-11 00:39:12 -04:00
|
|
|
}
|